body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
ff3371be03da802ccc87eeb31d182c0f0ec01a3f776dd711ac77f91916cced0d
def sns_heatmap(self, data2d, data_type): "\n Heatmap of 2d data with data annotated \n\n Parameters\n ----------\n data2d : data in 2d representation\n \n data_type : type of data to be annotated - 'int', 'float'\n \n fig_size: tuple, define figure size\n\n Returns\n -------\n None.\n\n eg. \n flights=sns.load_dataset('flights').pivot('month','year','passengers')\n myplt.sns_heatmap(flights,'int')\n " data_string = {'int': 'd', 'float': '0.1f'} sns.heatmap(data2d, annot=True, linewidths=0.5, fmt=data_string[data_type]) plt.show()
Heatmap of 2d data with data annotated Parameters ---------- data2d : data in 2d representation data_type : type of data to be annotated - 'int', 'float' fig_size: tuple, define figure size Returns ------- None. eg. flights=sns.load_dataset('flights').pivot('month','year','passengers') myplt.sns_heatmap(flights,'int')
plotting_utils.py
sns_heatmap
abishek85/plotting_tools
0
python
def sns_heatmap(self, data2d, data_type): "\n Heatmap of 2d data with data annotated \n\n Parameters\n ----------\n data2d : data in 2d representation\n \n data_type : type of data to be annotated - 'int', 'float'\n \n fig_size: tuple, define figure size\n\n Returns\n -------\n None.\n\n eg. \n flights=sns.load_dataset('flights').pivot('month','year','passengers')\n myplt.sns_heatmap(flights,'int')\n " data_string = {'int': 'd', 'float': '0.1f'} sns.heatmap(data2d, annot=True, linewidths=0.5, fmt=data_string[data_type]) plt.show()
def sns_heatmap(self, data2d, data_type): "\n Heatmap of 2d data with data annotated \n\n Parameters\n ----------\n data2d : data in 2d representation\n \n data_type : type of data to be annotated - 'int', 'float'\n \n fig_size: tuple, define figure size\n\n Returns\n -------\n None.\n\n eg. \n flights=sns.load_dataset('flights').pivot('month','year','passengers')\n myplt.sns_heatmap(flights,'int')\n " data_string = {'int': 'd', 'float': '0.1f'} sns.heatmap(data2d, annot=True, linewidths=0.5, fmt=data_string[data_type]) plt.show()<|docstring|>Heatmap of 2d data with data annotated Parameters ---------- data2d : data in 2d representation data_type : type of data to be annotated - 'int', 'float' fig_size: tuple, define figure size Returns ------- None. eg. flights=sns.load_dataset('flights').pivot('month','year','passengers') myplt.sns_heatmap(flights,'int')<|endoftext|>
afb793745cc553d45585392c62fd446c352d39625d7edecda593ba64347aa1ea
@pytest.fixture(name='supervisor') def mock_supervisor_fixture(): 'Mock Supervisor.' with patch('homeassistant.components.hassio.is_hassio', return_value=True): (yield)
Mock Supervisor.
tests/components/ozw/test_config_flow.py
mock_supervisor_fixture
GrandMoff100/homeassistant-core
22,481
python
@pytest.fixture(name='supervisor') def mock_supervisor_fixture(): with patch('homeassistant.components.hassio.is_hassio', return_value=True): (yield)
@pytest.fixture(name='supervisor') def mock_supervisor_fixture(): with patch('homeassistant.components.hassio.is_hassio', return_value=True): (yield)<|docstring|>Mock Supervisor.<|endoftext|>
152a39aa44b7feb6436d4742a97815cb6bf0d92ee99128af47d176cdf56737c3
@pytest.fixture(name='addon_info') def mock_addon_info(): 'Mock Supervisor add-on info.' with patch('homeassistant.components.hassio.async_get_addon_info') as addon_info: addon_info.return_value = {} (yield addon_info)
Mock Supervisor add-on info.
tests/components/ozw/test_config_flow.py
mock_addon_info
GrandMoff100/homeassistant-core
22,481
python
@pytest.fixture(name='addon_info') def mock_addon_info(): with patch('homeassistant.components.hassio.async_get_addon_info') as addon_info: addon_info.return_value = {} (yield addon_info)
@pytest.fixture(name='addon_info') def mock_addon_info(): with patch('homeassistant.components.hassio.async_get_addon_info') as addon_info: addon_info.return_value = {} (yield addon_info)<|docstring|>Mock Supervisor add-on info.<|endoftext|>
420e3db35dd6c7554c7ddc5bb21f6c061b9cbc70aa9bfb59f9cabae1c1312414
@pytest.fixture(name='addon_running') def mock_addon_running(addon_info): 'Mock add-on already running.' addon_info.return_value['state'] = 'started' return addon_info
Mock add-on already running.
tests/components/ozw/test_config_flow.py
mock_addon_running
GrandMoff100/homeassistant-core
22,481
python
@pytest.fixture(name='addon_running') def mock_addon_running(addon_info): addon_info.return_value['state'] = 'started' return addon_info
@pytest.fixture(name='addon_running') def mock_addon_running(addon_info): addon_info.return_value['state'] = 'started' return addon_info<|docstring|>Mock add-on already running.<|endoftext|>
5d389d0274c9c6e0054e62902d587cf8189f4fe72f16a403e28c6e6284e41a5d
@pytest.fixture(name='addon_installed') def mock_addon_installed(addon_info): 'Mock add-on already installed but not running.' addon_info.return_value['state'] = 'stopped' addon_info.return_value['version'] = '1.0' return addon_info
Mock add-on already installed but not running.
tests/components/ozw/test_config_flow.py
mock_addon_installed
GrandMoff100/homeassistant-core
22,481
python
@pytest.fixture(name='addon_installed') def mock_addon_installed(addon_info): addon_info.return_value['state'] = 'stopped' addon_info.return_value['version'] = '1.0' return addon_info
@pytest.fixture(name='addon_installed') def mock_addon_installed(addon_info): addon_info.return_value['state'] = 'stopped' addon_info.return_value['version'] = '1.0' return addon_info<|docstring|>Mock add-on already installed but not running.<|endoftext|>
3cb32aaea51c540cd61eeca43175c799a3b520183cc9132219fcf18abf11bcf9
@pytest.fixture(name='addon_options') def mock_addon_options(addon_info): 'Mock add-on options.' addon_info.return_value['options'] = {} return addon_info.return_value['options']
Mock add-on options.
tests/components/ozw/test_config_flow.py
mock_addon_options
GrandMoff100/homeassistant-core
22,481
python
@pytest.fixture(name='addon_options') def mock_addon_options(addon_info): addon_info.return_value['options'] = {} return addon_info.return_value['options']
@pytest.fixture(name='addon_options') def mock_addon_options(addon_info): addon_info.return_value['options'] = {} return addon_info.return_value['options']<|docstring|>Mock add-on options.<|endoftext|>
33cdfab75aaa8a6c2cee477d56b83c4626cfc45c985a31b478ea18e709564c8f
@pytest.fixture(name='set_addon_options') def mock_set_addon_options(): 'Mock set add-on options.' with patch('homeassistant.components.hassio.async_set_addon_options') as set_options: (yield set_options)
Mock set add-on options.
tests/components/ozw/test_config_flow.py
mock_set_addon_options
GrandMoff100/homeassistant-core
22,481
python
@pytest.fixture(name='set_addon_options') def mock_set_addon_options(): with patch('homeassistant.components.hassio.async_set_addon_options') as set_options: (yield set_options)
@pytest.fixture(name='set_addon_options') def mock_set_addon_options(): with patch('homeassistant.components.hassio.async_set_addon_options') as set_options: (yield set_options)<|docstring|>Mock set add-on options.<|endoftext|>
5ffc8b39611a282a17d6dd67c008df17f18e5dc06cfa17df9b9a0ebd63d8d1ae
@pytest.fixture(name='install_addon') def mock_install_addon(): 'Mock install add-on.' with patch('homeassistant.components.hassio.async_install_addon') as install_addon: (yield install_addon)
Mock install add-on.
tests/components/ozw/test_config_flow.py
mock_install_addon
GrandMoff100/homeassistant-core
22,481
python
@pytest.fixture(name='install_addon') def mock_install_addon(): with patch('homeassistant.components.hassio.async_install_addon') as install_addon: (yield install_addon)
@pytest.fixture(name='install_addon') def mock_install_addon(): with patch('homeassistant.components.hassio.async_install_addon') as install_addon: (yield install_addon)<|docstring|>Mock install add-on.<|endoftext|>
cc1f1901282f9892ad62188a65067b80d2f371a2e8e4c285af7d45a0164b6b04
@pytest.fixture(name='start_addon') def mock_start_addon(): 'Mock start add-on.' with patch('homeassistant.components.hassio.async_start_addon') as start_addon: (yield start_addon)
Mock start add-on.
tests/components/ozw/test_config_flow.py
mock_start_addon
GrandMoff100/homeassistant-core
22,481
python
@pytest.fixture(name='start_addon') def mock_start_addon(): with patch('homeassistant.components.hassio.async_start_addon') as start_addon: (yield start_addon)
@pytest.fixture(name='start_addon') def mock_start_addon(): with patch('homeassistant.components.hassio.async_start_addon') as start_addon: (yield start_addon)<|docstring|>Mock start add-on.<|endoftext|>
43d88906f6f5c0b5436b5022550b4a8532a29d4495bef16b7f907bc36d7b45f8
async def test_user_not_supervisor_create_entry(hass, mqtt): 'Test the user step creates an entry not on Supervisor.' with patch('homeassistant.components.ozw.async_setup_entry', return_value=True) as mock_setup_entry: result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) (await hass.async_block_till_done()) assert (result['type'] == 'create_entry') assert (result['title'] == TITLE) assert (result['data'] == {'usb_path': None, 'network_key': None, 'use_addon': False, 'integration_created_addon': False}) assert (len(mock_setup_entry.mock_calls) == 1)
Test the user step creates an entry not on Supervisor.
tests/components/ozw/test_config_flow.py
test_user_not_supervisor_create_entry
GrandMoff100/homeassistant-core
22,481
python
async def test_user_not_supervisor_create_entry(hass, mqtt): with patch('homeassistant.components.ozw.async_setup_entry', return_value=True) as mock_setup_entry: result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) (await hass.async_block_till_done()) assert (result['type'] == 'create_entry') assert (result['title'] == TITLE) assert (result['data'] == {'usb_path': None, 'network_key': None, 'use_addon': False, 'integration_created_addon': False}) assert (len(mock_setup_entry.mock_calls) == 1)
async def test_user_not_supervisor_create_entry(hass, mqtt): with patch('homeassistant.components.ozw.async_setup_entry', return_value=True) as mock_setup_entry: result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) (await hass.async_block_till_done()) assert (result['type'] == 'create_entry') assert (result['title'] == TITLE) assert (result['data'] == {'usb_path': None, 'network_key': None, 'use_addon': False, 'integration_created_addon': False}) assert (len(mock_setup_entry.mock_calls) == 1)<|docstring|>Test the user step creates an entry not on Supervisor.<|endoftext|>
8b9ae2c855106a4b9053dab56b1cd90725b399e467a9b740e98abaa9bf73134b
async def test_mqtt_not_setup(hass): 'Test that mqtt is required.' result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) assert (result['type'] == 'abort') assert (result['reason'] == 'mqtt_required')
Test that mqtt is required.
tests/components/ozw/test_config_flow.py
test_mqtt_not_setup
GrandMoff100/homeassistant-core
22,481
python
async def test_mqtt_not_setup(hass): result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) assert (result['type'] == 'abort') assert (result['reason'] == 'mqtt_required')
async def test_mqtt_not_setup(hass): result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) assert (result['type'] == 'abort') assert (result['reason'] == 'mqtt_required')<|docstring|>Test that mqtt is required.<|endoftext|>
f36a0f0cfa35fdb75220a3fbc36ff7ca9a7ee971ca75572c62be5812d5b24228
async def test_one_instance_allowed(hass): 'Test that only one instance is allowed.' entry = MockConfigEntry(domain=DOMAIN, data={}, title=TITLE) entry.add_to_hass(hass) result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) assert (result['type'] == 'abort') assert (result['reason'] == 'single_instance_allowed')
Test that only one instance is allowed.
tests/components/ozw/test_config_flow.py
test_one_instance_allowed
GrandMoff100/homeassistant-core
22,481
python
async def test_one_instance_allowed(hass): entry = MockConfigEntry(domain=DOMAIN, data={}, title=TITLE) entry.add_to_hass(hass) result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) assert (result['type'] == 'abort') assert (result['reason'] == 'single_instance_allowed')
async def test_one_instance_allowed(hass): entry = MockConfigEntry(domain=DOMAIN, data={}, title=TITLE) entry.add_to_hass(hass) result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) assert (result['type'] == 'abort') assert (result['reason'] == 'single_instance_allowed')<|docstring|>Test that only one instance is allowed.<|endoftext|>
96919bf500969cc63d12c3fac7efe50ff7c2f03e81cee1c80c68bd8f6e27c5c7
async def test_not_addon(hass, supervisor, mqtt): 'Test opting out of add-on on Supervisor.' result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) with patch('homeassistant.components.ozw.async_setup_entry', return_value=True) as mock_setup_entry: result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': False})) (await hass.async_block_till_done()) assert (result['type'] == 'create_entry') assert (result['title'] == TITLE) assert (result['data'] == {'usb_path': None, 'network_key': None, 'use_addon': False, 'integration_created_addon': False}) assert (len(mock_setup_entry.mock_calls) == 1)
Test opting out of add-on on Supervisor.
tests/components/ozw/test_config_flow.py
test_not_addon
GrandMoff100/homeassistant-core
22,481
python
async def test_not_addon(hass, supervisor, mqtt): result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) with patch('homeassistant.components.ozw.async_setup_entry', return_value=True) as mock_setup_entry: result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': False})) (await hass.async_block_till_done()) assert (result['type'] == 'create_entry') assert (result['title'] == TITLE) assert (result['data'] == {'usb_path': None, 'network_key': None, 'use_addon': False, 'integration_created_addon': False}) assert (len(mock_setup_entry.mock_calls) == 1)
async def test_not_addon(hass, supervisor, mqtt): result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) with patch('homeassistant.components.ozw.async_setup_entry', return_value=True) as mock_setup_entry: result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': False})) (await hass.async_block_till_done()) assert (result['type'] == 'create_entry') assert (result['title'] == TITLE) assert (result['data'] == {'usb_path': None, 'network_key': None, 'use_addon': False, 'integration_created_addon': False}) assert (len(mock_setup_entry.mock_calls) == 1)<|docstring|>Test opting out of add-on on Supervisor.<|endoftext|>
ee583bdc9306319c691945f167ffb707f11bc7ea9aa3bdd3f7a56a5d6bb9bce2
async def test_addon_running(hass, supervisor, addon_running, addon_options): 'Test add-on already running on Supervisor.' addon_options['device'] = '/test' addon_options['network_key'] = 'abc123' result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) with patch('homeassistant.components.ozw.async_setup_entry', return_value=True) as mock_setup_entry: result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': True})) (await hass.async_block_till_done()) assert (result['type'] == 'create_entry') assert (result['title'] == TITLE) assert (result['data'] == {'usb_path': '/test', 'network_key': 'abc123', 'use_addon': True, 'integration_created_addon': False}) assert (len(mock_setup_entry.mock_calls) == 1)
Test add-on already running on Supervisor.
tests/components/ozw/test_config_flow.py
test_addon_running
GrandMoff100/homeassistant-core
22,481
python
async def test_addon_running(hass, supervisor, addon_running, addon_options): addon_options['device'] = '/test' addon_options['network_key'] = 'abc123' result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) with patch('homeassistant.components.ozw.async_setup_entry', return_value=True) as mock_setup_entry: result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': True})) (await hass.async_block_till_done()) assert (result['type'] == 'create_entry') assert (result['title'] == TITLE) assert (result['data'] == {'usb_path': '/test', 'network_key': 'abc123', 'use_addon': True, 'integration_created_addon': False}) assert (len(mock_setup_entry.mock_calls) == 1)
async def test_addon_running(hass, supervisor, addon_running, addon_options): addon_options['device'] = '/test' addon_options['network_key'] = 'abc123' result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) with patch('homeassistant.components.ozw.async_setup_entry', return_value=True) as mock_setup_entry: result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': True})) (await hass.async_block_till_done()) assert (result['type'] == 'create_entry') assert (result['title'] == TITLE) assert (result['data'] == {'usb_path': '/test', 'network_key': 'abc123', 'use_addon': True, 'integration_created_addon': False}) assert (len(mock_setup_entry.mock_calls) == 1)<|docstring|>Test add-on already running on Supervisor.<|endoftext|>
2061ccb8fff2c1b640345d38a5481d2a20b76d17ef714f10cd5672e27036f5d5
async def test_addon_info_failure(hass, supervisor, addon_info): 'Test add-on info failure.' addon_info.side_effect = HassioAPIError() result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': True})) assert (result['type'] == 'abort') assert (result['reason'] == 'addon_info_failed')
Test add-on info failure.
tests/components/ozw/test_config_flow.py
test_addon_info_failure
GrandMoff100/homeassistant-core
22,481
python
async def test_addon_info_failure(hass, supervisor, addon_info): addon_info.side_effect = HassioAPIError() result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': True})) assert (result['type'] == 'abort') assert (result['reason'] == 'addon_info_failed')
async def test_addon_info_failure(hass, supervisor, addon_info): addon_info.side_effect = HassioAPIError() result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': True})) assert (result['type'] == 'abort') assert (result['reason'] == 'addon_info_failed')<|docstring|>Test add-on info failure.<|endoftext|>
5a843dde600b1257be47b227bdc7d0f0967cff1838776206fe853d330fb3b960
async def test_addon_installed(hass, supervisor, addon_installed, addon_options, set_addon_options, start_addon): 'Test add-on already installed but not running on Supervisor.' result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': True})) with patch('homeassistant.components.ozw.async_setup_entry', return_value=True) as mock_setup_entry: result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'usb_path': '/test', 'network_key': 'abc123'})) (await hass.async_block_till_done()) assert (result['type'] == 'create_entry') assert (result['title'] == TITLE) assert (result['data'] == {'usb_path': '/test', 'network_key': 'abc123', 'use_addon': True, 'integration_created_addon': False}) assert (len(mock_setup_entry.mock_calls) == 1)
Test add-on already installed but not running on Supervisor.
tests/components/ozw/test_config_flow.py
test_addon_installed
GrandMoff100/homeassistant-core
22,481
python
async def test_addon_installed(hass, supervisor, addon_installed, addon_options, set_addon_options, start_addon): result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': True})) with patch('homeassistant.components.ozw.async_setup_entry', return_value=True) as mock_setup_entry: result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'usb_path': '/test', 'network_key': 'abc123'})) (await hass.async_block_till_done()) assert (result['type'] == 'create_entry') assert (result['title'] == TITLE) assert (result['data'] == {'usb_path': '/test', 'network_key': 'abc123', 'use_addon': True, 'integration_created_addon': False}) assert (len(mock_setup_entry.mock_calls) == 1)
async def test_addon_installed(hass, supervisor, addon_installed, addon_options, set_addon_options, start_addon): result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': True})) with patch('homeassistant.components.ozw.async_setup_entry', return_value=True) as mock_setup_entry: result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'usb_path': '/test', 'network_key': 'abc123'})) (await hass.async_block_till_done()) assert (result['type'] == 'create_entry') assert (result['title'] == TITLE) assert (result['data'] == {'usb_path': '/test', 'network_key': 'abc123', 'use_addon': True, 'integration_created_addon': False}) assert (len(mock_setup_entry.mock_calls) == 1)<|docstring|>Test add-on already installed but not running on Supervisor.<|endoftext|>
099962a03163ed9f08a23593d3b5b34079776879c7af07a75e40c85ec5d0001c
async def test_set_addon_config_failure(hass, supervisor, addon_installed, addon_options, set_addon_options): 'Test add-on set config failure.' set_addon_options.side_effect = HassioAPIError() result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': True})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'usb_path': '/test', 'network_key': 'abc123'})) assert (result['type'] == 'abort') assert (result['reason'] == 'addon_set_config_failed')
Test add-on set config failure.
tests/components/ozw/test_config_flow.py
test_set_addon_config_failure
GrandMoff100/homeassistant-core
22,481
python
async def test_set_addon_config_failure(hass, supervisor, addon_installed, addon_options, set_addon_options): set_addon_options.side_effect = HassioAPIError() result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': True})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'usb_path': '/test', 'network_key': 'abc123'})) assert (result['type'] == 'abort') assert (result['reason'] == 'addon_set_config_failed')
async def test_set_addon_config_failure(hass, supervisor, addon_installed, addon_options, set_addon_options): set_addon_options.side_effect = HassioAPIError() result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': True})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'usb_path': '/test', 'network_key': 'abc123'})) assert (result['type'] == 'abort') assert (result['reason'] == 'addon_set_config_failed')<|docstring|>Test add-on set config failure.<|endoftext|>
2d2b02370376c60f61a553f229a62108d7d2a749583bff042ded666c855df635
async def test_start_addon_failure(hass, supervisor, addon_installed, addon_options, set_addon_options, start_addon): 'Test add-on start failure.' start_addon.side_effect = HassioAPIError() result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': True})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'usb_path': '/test', 'network_key': 'abc123'})) assert (result['type'] == 'form') assert (result['errors'] == {'base': 'addon_start_failed'})
Test add-on start failure.
tests/components/ozw/test_config_flow.py
test_start_addon_failure
GrandMoff100/homeassistant-core
22,481
python
async def test_start_addon_failure(hass, supervisor, addon_installed, addon_options, set_addon_options, start_addon): start_addon.side_effect = HassioAPIError() result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': True})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'usb_path': '/test', 'network_key': 'abc123'})) assert (result['type'] == 'form') assert (result['errors'] == {'base': 'addon_start_failed'})
async def test_start_addon_failure(hass, supervisor, addon_installed, addon_options, set_addon_options, start_addon): start_addon.side_effect = HassioAPIError() result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': True})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'usb_path': '/test', 'network_key': 'abc123'})) assert (result['type'] == 'form') assert (result['errors'] == {'base': 'addon_start_failed'})<|docstring|>Test add-on start failure.<|endoftext|>
16b9a614f08c1f7eefaeb3c611e923b93647e994bd0fab9e6b35208e05376d64
async def test_addon_not_installed(hass, supervisor, addon_installed, install_addon, addon_options, set_addon_options, start_addon): 'Test add-on not installed.' addon_installed.return_value['version'] = None result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': True})) assert (result['type'] == 'progress') (await hass.async_block_till_done()) result = (await hass.config_entries.flow.async_configure(result['flow_id'])) assert (result['type'] == 'form') assert (result['step_id'] == 'start_addon') with patch('homeassistant.components.ozw.async_setup_entry', return_value=True) as mock_setup_entry: result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'usb_path': '/test', 'network_key': 'abc123'})) (await hass.async_block_till_done()) assert (result['type'] == 'create_entry') assert (result['title'] == TITLE) assert (result['data'] == {'usb_path': '/test', 'network_key': 'abc123', 'use_addon': True, 'integration_created_addon': True}) assert (len(mock_setup_entry.mock_calls) == 1)
Test add-on not installed.
tests/components/ozw/test_config_flow.py
test_addon_not_installed
GrandMoff100/homeassistant-core
22,481
python
async def test_addon_not_installed(hass, supervisor, addon_installed, install_addon, addon_options, set_addon_options, start_addon): addon_installed.return_value['version'] = None result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': True})) assert (result['type'] == 'progress') (await hass.async_block_till_done()) result = (await hass.config_entries.flow.async_configure(result['flow_id'])) assert (result['type'] == 'form') assert (result['step_id'] == 'start_addon') with patch('homeassistant.components.ozw.async_setup_entry', return_value=True) as mock_setup_entry: result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'usb_path': '/test', 'network_key': 'abc123'})) (await hass.async_block_till_done()) assert (result['type'] == 'create_entry') assert (result['title'] == TITLE) assert (result['data'] == {'usb_path': '/test', 'network_key': 'abc123', 'use_addon': True, 'integration_created_addon': True}) assert (len(mock_setup_entry.mock_calls) == 1)
async def test_addon_not_installed(hass, supervisor, addon_installed, install_addon, addon_options, set_addon_options, start_addon): addon_installed.return_value['version'] = None result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': True})) assert (result['type'] == 'progress') (await hass.async_block_till_done()) result = (await hass.config_entries.flow.async_configure(result['flow_id'])) assert (result['type'] == 'form') assert (result['step_id'] == 'start_addon') with patch('homeassistant.components.ozw.async_setup_entry', return_value=True) as mock_setup_entry: result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'usb_path': '/test', 'network_key': 'abc123'})) (await hass.async_block_till_done()) assert (result['type'] == 'create_entry') assert (result['title'] == TITLE) assert (result['data'] == {'usb_path': '/test', 'network_key': 'abc123', 'use_addon': True, 'integration_created_addon': True}) assert (len(mock_setup_entry.mock_calls) == 1)<|docstring|>Test add-on not installed.<|endoftext|>
f514fd2678120a70e9fc84932691a57a26e814aef4837ee6dea823b26f49008c
async def test_install_addon_failure(hass, supervisor, addon_installed, install_addon): 'Test add-on install failure.' addon_installed.return_value['version'] = None install_addon.side_effect = HassioAPIError() result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': True})) assert (result['type'] == 'progress') (await hass.async_block_till_done()) result = (await hass.config_entries.flow.async_configure(result['flow_id'])) assert (result['type'] == 'abort') assert (result['reason'] == 'addon_install_failed')
Test add-on install failure.
tests/components/ozw/test_config_flow.py
test_install_addon_failure
GrandMoff100/homeassistant-core
22,481
python
async def test_install_addon_failure(hass, supervisor, addon_installed, install_addon): addon_installed.return_value['version'] = None install_addon.side_effect = HassioAPIError() result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': True})) assert (result['type'] == 'progress') (await hass.async_block_till_done()) result = (await hass.config_entries.flow.async_configure(result['flow_id'])) assert (result['type'] == 'abort') assert (result['reason'] == 'addon_install_failed')
async def test_install_addon_failure(hass, supervisor, addon_installed, install_addon): addon_installed.return_value['version'] = None install_addon.side_effect = HassioAPIError() result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': True})) assert (result['type'] == 'progress') (await hass.async_block_till_done()) result = (await hass.config_entries.flow.async_configure(result['flow_id'])) assert (result['type'] == 'abort') assert (result['reason'] == 'addon_install_failed')<|docstring|>Test add-on install failure.<|endoftext|>
0ea408fdc7a0c0ef80b5a2c3fd6ddb6d213d453fd1873c9e14687c871cbf04ea
async def test_supervisor_discovery(hass, supervisor, addon_running, addon_options): 'Test flow started from Supervisor discovery.' addon_options['device'] = '/test' addon_options['network_key'] = 'abc123' result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_HASSIO}, data=ADDON_DISCOVERY_INFO)) with patch('homeassistant.components.ozw.async_setup_entry', return_value=True) as mock_setup_entry: result = (await hass.config_entries.flow.async_configure(result['flow_id'], {})) (await hass.async_block_till_done()) assert (result['type'] == 'create_entry') assert (result['title'] == TITLE) assert (result['data'] == {'usb_path': '/test', 'network_key': 'abc123', 'use_addon': True, 'integration_created_addon': False}) assert (len(mock_setup_entry.mock_calls) == 1)
Test flow started from Supervisor discovery.
tests/components/ozw/test_config_flow.py
test_supervisor_discovery
GrandMoff100/homeassistant-core
22,481
python
async def test_supervisor_discovery(hass, supervisor, addon_running, addon_options): addon_options['device'] = '/test' addon_options['network_key'] = 'abc123' result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_HASSIO}, data=ADDON_DISCOVERY_INFO)) with patch('homeassistant.components.ozw.async_setup_entry', return_value=True) as mock_setup_entry: result = (await hass.config_entries.flow.async_configure(result['flow_id'], {})) (await hass.async_block_till_done()) assert (result['type'] == 'create_entry') assert (result['title'] == TITLE) assert (result['data'] == {'usb_path': '/test', 'network_key': 'abc123', 'use_addon': True, 'integration_created_addon': False}) assert (len(mock_setup_entry.mock_calls) == 1)
async def test_supervisor_discovery(hass, supervisor, addon_running, addon_options): addon_options['device'] = '/test' addon_options['network_key'] = 'abc123' result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_HASSIO}, data=ADDON_DISCOVERY_INFO)) with patch('homeassistant.components.ozw.async_setup_entry', return_value=True) as mock_setup_entry: result = (await hass.config_entries.flow.async_configure(result['flow_id'], {})) (await hass.async_block_till_done()) assert (result['type'] == 'create_entry') assert (result['title'] == TITLE) assert (result['data'] == {'usb_path': '/test', 'network_key': 'abc123', 'use_addon': True, 'integration_created_addon': False}) assert (len(mock_setup_entry.mock_calls) == 1)<|docstring|>Test flow started from Supervisor discovery.<|endoftext|>
abb5be4cd9760c2894793a46af746015b0cca34f395fa02833e8b43317750caa
async def test_clean_discovery_on_user_create(hass, supervisor, addon_running, addon_options): 'Test discovery flow is cleaned up when a user flow is finished.' addon_options['device'] = '/test' addon_options['network_key'] = 'abc123' result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_HASSIO}, data=ADDON_DISCOVERY_INFO)) assert (result['type'] == 'form') result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) with patch('homeassistant.components.ozw.async_setup_entry', return_value=True) as mock_setup_entry: result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': False})) (await hass.async_block_till_done()) assert (len(hass.config_entries.flow.async_progress()) == 0) assert (result['type'] == 'create_entry') assert (result['title'] == TITLE) assert (result['data'] == {'usb_path': None, 'network_key': None, 'use_addon': False, 'integration_created_addon': False}) assert (len(mock_setup_entry.mock_calls) == 1)
Test discovery flow is cleaned up when a user flow is finished.
tests/components/ozw/test_config_flow.py
test_clean_discovery_on_user_create
GrandMoff100/homeassistant-core
22,481
python
async def test_clean_discovery_on_user_create(hass, supervisor, addon_running, addon_options): addon_options['device'] = '/test' addon_options['network_key'] = 'abc123' result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_HASSIO}, data=ADDON_DISCOVERY_INFO)) assert (result['type'] == 'form') result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) with patch('homeassistant.components.ozw.async_setup_entry', return_value=True) as mock_setup_entry: result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': False})) (await hass.async_block_till_done()) assert (len(hass.config_entries.flow.async_progress()) == 0) assert (result['type'] == 'create_entry') assert (result['title'] == TITLE) assert (result['data'] == {'usb_path': None, 'network_key': None, 'use_addon': False, 'integration_created_addon': False}) assert (len(mock_setup_entry.mock_calls) == 1)
async def test_clean_discovery_on_user_create(hass, supervisor, addon_running, addon_options): addon_options['device'] = '/test' addon_options['network_key'] = 'abc123' result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_HASSIO}, data=ADDON_DISCOVERY_INFO)) assert (result['type'] == 'form') result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) with patch('homeassistant.components.ozw.async_setup_entry', return_value=True) as mock_setup_entry: result = (await hass.config_entries.flow.async_configure(result['flow_id'], {'use_addon': False})) (await hass.async_block_till_done()) assert (len(hass.config_entries.flow.async_progress()) == 0) assert (result['type'] == 'create_entry') assert (result['title'] == TITLE) assert (result['data'] == {'usb_path': None, 'network_key': None, 'use_addon': False, 'integration_created_addon': False}) assert (len(mock_setup_entry.mock_calls) == 1)<|docstring|>Test discovery flow is cleaned up when a user flow is finished.<|endoftext|>
a91a15828beffc68d7d8d9f869b6b73e36a57917cfead9b19640a20b34cf4f80
async def test_abort_discovery_with_user_flow(hass, supervisor, addon_running, addon_options): 'Test discovery flow is aborted if a user flow is in progress.' (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_HASSIO}, data=ADDON_DISCOVERY_INFO)) assert (result['type'] == 'abort') assert (result['reason'] == 'already_in_progress') assert (len(hass.config_entries.flow.async_progress()) == 1)
Test discovery flow is aborted if a user flow is in progress.
tests/components/ozw/test_config_flow.py
test_abort_discovery_with_user_flow
GrandMoff100/homeassistant-core
22,481
python
async def test_abort_discovery_with_user_flow(hass, supervisor, addon_running, addon_options): (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_HASSIO}, data=ADDON_DISCOVERY_INFO)) assert (result['type'] == 'abort') assert (result['reason'] == 'already_in_progress') assert (len(hass.config_entries.flow.async_progress()) == 1)
async def test_abort_discovery_with_user_flow(hass, supervisor, addon_running, addon_options): (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_USER})) result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_HASSIO}, data=ADDON_DISCOVERY_INFO)) assert (result['type'] == 'abort') assert (result['reason'] == 'already_in_progress') assert (len(hass.config_entries.flow.async_progress()) == 1)<|docstring|>Test discovery flow is aborted if a user flow is in progress.<|endoftext|>
0025da8b0666d36bc05b73c8d20ed8a5cdf1d29e53838a2d467f1813f8c014db
async def test_abort_discovery_with_existing_entry(hass, supervisor, addon_running, addon_options): 'Test discovery flow is aborted if an entry already exists.' entry = MockConfigEntry(domain=DOMAIN, data={}, title=TITLE, unique_id=DOMAIN) entry.add_to_hass(hass) result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_HASSIO}, data=ADDON_DISCOVERY_INFO)) assert (result['type'] == 'abort') assert (result['reason'] == 'already_configured')
Test discovery flow is aborted if an entry already exists.
tests/components/ozw/test_config_flow.py
test_abort_discovery_with_existing_entry
GrandMoff100/homeassistant-core
22,481
python
async def test_abort_discovery_with_existing_entry(hass, supervisor, addon_running, addon_options): entry = MockConfigEntry(domain=DOMAIN, data={}, title=TITLE, unique_id=DOMAIN) entry.add_to_hass(hass) result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_HASSIO}, data=ADDON_DISCOVERY_INFO)) assert (result['type'] == 'abort') assert (result['reason'] == 'already_configured')
async def test_abort_discovery_with_existing_entry(hass, supervisor, addon_running, addon_options): entry = MockConfigEntry(domain=DOMAIN, data={}, title=TITLE, unique_id=DOMAIN) entry.add_to_hass(hass) result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_HASSIO}, data=ADDON_DISCOVERY_INFO)) assert (result['type'] == 'abort') assert (result['reason'] == 'already_configured')<|docstring|>Test discovery flow is aborted if an entry already exists.<|endoftext|>
1ddef9fe1760eee5dea515dfedf86e1f1cc5e27d561ff4cb97d45f9ea11125bc
async def test_discovery_addon_not_running(hass, supervisor, addon_installed, addon_options, set_addon_options, start_addon): 'Test discovery with add-on already installed but not running.' addon_options['device'] = None result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_HASSIO}, data=ADDON_DISCOVERY_INFO)) assert (result['step_id'] == 'hassio_confirm') assert (result['type'] == 'form') result = (await hass.config_entries.flow.async_configure(result['flow_id'], {})) assert (result['step_id'] == 'start_addon') assert (result['type'] == 'form')
Test discovery with add-on already installed but not running.
tests/components/ozw/test_config_flow.py
test_discovery_addon_not_running
GrandMoff100/homeassistant-core
22,481
python
async def test_discovery_addon_not_running(hass, supervisor, addon_installed, addon_options, set_addon_options, start_addon): addon_options['device'] = None result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_HASSIO}, data=ADDON_DISCOVERY_INFO)) assert (result['step_id'] == 'hassio_confirm') assert (result['type'] == 'form') result = (await hass.config_entries.flow.async_configure(result['flow_id'], {})) assert (result['step_id'] == 'start_addon') assert (result['type'] == 'form')
async def test_discovery_addon_not_running(hass, supervisor, addon_installed, addon_options, set_addon_options, start_addon): addon_options['device'] = None result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_HASSIO}, data=ADDON_DISCOVERY_INFO)) assert (result['step_id'] == 'hassio_confirm') assert (result['type'] == 'form') result = (await hass.config_entries.flow.async_configure(result['flow_id'], {})) assert (result['step_id'] == 'start_addon') assert (result['type'] == 'form')<|docstring|>Test discovery with add-on already installed but not running.<|endoftext|>
0c59c9bcf28cbeb91c8c90a0130ff53b1393587e276718651ea3550314b38103
async def test_discovery_addon_not_installed(hass, supervisor, addon_installed, install_addon, addon_options): 'Test discovery with add-on not installed.' addon_installed.return_value['version'] = None result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_HASSIO}, data=ADDON_DISCOVERY_INFO)) assert (result['step_id'] == 'hassio_confirm') assert (result['type'] == 'form') result = (await hass.config_entries.flow.async_configure(result['flow_id'], {})) assert (result['step_id'] == 'install_addon') assert (result['type'] == 'progress') (await hass.async_block_till_done()) result = (await hass.config_entries.flow.async_configure(result['flow_id'])) assert (result['type'] == 'form') assert (result['step_id'] == 'start_addon')
Test discovery with add-on not installed.
tests/components/ozw/test_config_flow.py
test_discovery_addon_not_installed
GrandMoff100/homeassistant-core
22,481
python
async def test_discovery_addon_not_installed(hass, supervisor, addon_installed, install_addon, addon_options): addon_installed.return_value['version'] = None result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_HASSIO}, data=ADDON_DISCOVERY_INFO)) assert (result['step_id'] == 'hassio_confirm') assert (result['type'] == 'form') result = (await hass.config_entries.flow.async_configure(result['flow_id'], {})) assert (result['step_id'] == 'install_addon') assert (result['type'] == 'progress') (await hass.async_block_till_done()) result = (await hass.config_entries.flow.async_configure(result['flow_id'])) assert (result['type'] == 'form') assert (result['step_id'] == 'start_addon')
async def test_discovery_addon_not_installed(hass, supervisor, addon_installed, install_addon, addon_options): addon_installed.return_value['version'] = None result = (await hass.config_entries.flow.async_init(DOMAIN, context={'source': config_entries.SOURCE_HASSIO}, data=ADDON_DISCOVERY_INFO)) assert (result['step_id'] == 'hassio_confirm') assert (result['type'] == 'form') result = (await hass.config_entries.flow.async_configure(result['flow_id'], {})) assert (result['step_id'] == 'install_addon') assert (result['type'] == 'progress') (await hass.async_block_till_done()) result = (await hass.config_entries.flow.async_configure(result['flow_id'])) assert (result['type'] == 'form') assert (result['step_id'] == 'start_addon')<|docstring|>Test discovery with add-on not installed.<|endoftext|>
61fad87d1a4ee066bb2e749e6a12e827e51c83ac18f08a105a464b8c4e0f69ec
def test_power_button_long_press(self): ' Verify Power Button long press menu\n https://moztrap.mozilla.org/manage/case/1330/\n ' sleep_menu = SleepScreen(self.marionette) self.device.hold_sleep_button() sleep_menu.wait_for_sleep_menu_visible() self.assertEqual(sleep_menu.title, 'Phone') sleep_menu_items = [item.name for item in sleep_menu.menu_items] for item in MENU_ITEMS: self.assertIn(item, sleep_menu_items) sleep_menu.tap_cancel_button() self.assertFalse(sleep_menu.is_menu_visible)
Verify Power Button long press menu https://moztrap.mozilla.org/manage/case/1330/
tests/python/gaia-ui-tests/gaiatest/tests/functional/system/test_power_button_long_press.py
test_power_button_long_press
tauzen/gaia
1
python
def test_power_button_long_press(self): ' Verify Power Button long press menu\n https://moztrap.mozilla.org/manage/case/1330/\n ' sleep_menu = SleepScreen(self.marionette) self.device.hold_sleep_button() sleep_menu.wait_for_sleep_menu_visible() self.assertEqual(sleep_menu.title, 'Phone') sleep_menu_items = [item.name for item in sleep_menu.menu_items] for item in MENU_ITEMS: self.assertIn(item, sleep_menu_items) sleep_menu.tap_cancel_button() self.assertFalse(sleep_menu.is_menu_visible)
def test_power_button_long_press(self): ' Verify Power Button long press menu\n https://moztrap.mozilla.org/manage/case/1330/\n ' sleep_menu = SleepScreen(self.marionette) self.device.hold_sleep_button() sleep_menu.wait_for_sleep_menu_visible() self.assertEqual(sleep_menu.title, 'Phone') sleep_menu_items = [item.name for item in sleep_menu.menu_items] for item in MENU_ITEMS: self.assertIn(item, sleep_menu_items) sleep_menu.tap_cancel_button() self.assertFalse(sleep_menu.is_menu_visible)<|docstring|>Verify Power Button long press menu https://moztrap.mozilla.org/manage/case/1330/<|endoftext|>
3efcdff092d241d828a59671090e98a34ab42bf9a64a99f5f001701473d2a3df
def test_linear_regression(self): 'LinearRegression predictor can find a linear fit.' numpy.random.seed(0) xs = numpy.linspace(0, 1, 100) ys = ((2 * xs) - 1) noise = numpy.random.normal(size=xs.shape, scale=0.2) xs = xs.reshape(((- 1), 1)) ts = (ys + noise).reshape((1, (- 1), 1)) ids = list(range(100)) with acton.database.ManagedHDF5Database(self.db_path) as db: db.write_features(ids, xs) db.write_labels([0], ids, ts) lr = acton.predictors.PREDICTORS['LinearRegression'](db) lr.fit(ids) (predictions, _variances) = lr.predict(ids) logging.debug('Labels: {}'.format(ys)) logging.debug('Predictions: {}'.format(predictions)) self.assertTrue(numpy.allclose(ys, predictions.ravel(), atol=0.2))
LinearRegression predictor can find a linear fit.
tests/test_regression.py
test_linear_regression
Mengyanz/acton
20
python
def test_linear_regression(self): numpy.random.seed(0) xs = numpy.linspace(0, 1, 100) ys = ((2 * xs) - 1) noise = numpy.random.normal(size=xs.shape, scale=0.2) xs = xs.reshape(((- 1), 1)) ts = (ys + noise).reshape((1, (- 1), 1)) ids = list(range(100)) with acton.database.ManagedHDF5Database(self.db_path) as db: db.write_features(ids, xs) db.write_labels([0], ids, ts) lr = acton.predictors.PREDICTORS['LinearRegression'](db) lr.fit(ids) (predictions, _variances) = lr.predict(ids) logging.debug('Labels: {}'.format(ys)) logging.debug('Predictions: {}'.format(predictions)) self.assertTrue(numpy.allclose(ys, predictions.ravel(), atol=0.2))
def test_linear_regression(self): numpy.random.seed(0) xs = numpy.linspace(0, 1, 100) ys = ((2 * xs) - 1) noise = numpy.random.normal(size=xs.shape, scale=0.2) xs = xs.reshape(((- 1), 1)) ts = (ys + noise).reshape((1, (- 1), 1)) ids = list(range(100)) with acton.database.ManagedHDF5Database(self.db_path) as db: db.write_features(ids, xs) db.write_labels([0], ids, ts) lr = acton.predictors.PREDICTORS['LinearRegression'](db) lr.fit(ids) (predictions, _variances) = lr.predict(ids) logging.debug('Labels: {}'.format(ys)) logging.debug('Predictions: {}'.format(predictions)) self.assertTrue(numpy.allclose(ys, predictions.ravel(), atol=0.2))<|docstring|>LinearRegression predictor can find a linear fit.<|endoftext|>
ea76712667e009537971dc84db5cd1ebda67e03eba19393e11aa09285cf4b1cc
def multiprocess_by_address(addresses, worker_class, output_file_path, output_field_names, num_processes=None, log_every=5): '\n Addresses are put into a read queue.\n Several workers are spawn with one SQLAlchemy session each (should be used for read only queries).\n Each worker takes addresses from the read queue and puts outputs into a write queue (in dictionary format).\n After all the workers are done, the outputs are taken from write the queue and added to the session.\n At the end the session is committed.\n :param addresses: contract address to process\n :param worker_class: the one that actually does the processing\n :param output_file_path:\n :param output_field_names:\n :param num_processes: how many workers should be spawned\n :param log_every: amount of seconds between between count logs\n ' start_time = time.time() if (num_processes is None): num_processes = (cpu_count() - 1) read_queue = Queue() write_queue = Queue() for address in addresses: read_queue.put(address) write_process = Process(target=write_worker, args=(write_queue, output_file_path, output_field_names)) write_process.start() count_process = Process(target=count_worker, args=(read_queue, log_every)) count_process.start() worker_processes = [] for _ in range(num_processes): worker_process = Process(target=worker_wrapper, args=(read_queue, worker_class, write_queue)) worker_process.start() worker_processes.append(worker_process) logger.info('Waiting for the workers...') for worker_process in worker_processes: worker_process.join() logger.info('Workers finished.') write_queue.put({'event_type': EVENT_TYPE_EXIT}) write_process.join() elapsed_time = (time.time() - start_time) elapsed_time_unit = 'seconds' if (elapsed_time > 60): elapsed_time /= 60 elapsed_time_unit = 'minutes' if (elapsed_time > 60): elapsed_time /= 60 elapsed_time_unit = 'hours' if (elapsed_time > 24): elapsed_time /= 24 elapsed_time_unit = 'days' logger.info('Total time: {} {}'.format(elapsed_time, elapsed_time_unit))
Addresses are put into a read queue. Several workers are spawn with one SQLAlchemy session each (should be used for read only queries). Each worker takes addresses from the read queue and puts outputs into a write queue (in dictionary format). After all the workers are done, the outputs are taken from write the queue and added to the session. At the end the session is committed. :param addresses: contract address to process :param worker_class: the one that actually does the processing :param output_file_path: :param output_field_names: :param num_processes: how many workers should be spawned :param log_every: amount of seconds between between count logs
honeypot_detection/multiprocess_by_address.py
multiprocess_by_address
betaswap/honeypot-detection
0
python
def multiprocess_by_address(addresses, worker_class, output_file_path, output_field_names, num_processes=None, log_every=5): '\n Addresses are put into a read queue.\n Several workers are spawn with one SQLAlchemy session each (should be used for read only queries).\n Each worker takes addresses from the read queue and puts outputs into a write queue (in dictionary format).\n After all the workers are done, the outputs are taken from write the queue and added to the session.\n At the end the session is committed.\n :param addresses: contract address to process\n :param worker_class: the one that actually does the processing\n :param output_file_path:\n :param output_field_names:\n :param num_processes: how many workers should be spawned\n :param log_every: amount of seconds between between count logs\n ' start_time = time.time() if (num_processes is None): num_processes = (cpu_count() - 1) read_queue = Queue() write_queue = Queue() for address in addresses: read_queue.put(address) write_process = Process(target=write_worker, args=(write_queue, output_file_path, output_field_names)) write_process.start() count_process = Process(target=count_worker, args=(read_queue, log_every)) count_process.start() worker_processes = [] for _ in range(num_processes): worker_process = Process(target=worker_wrapper, args=(read_queue, worker_class, write_queue)) worker_process.start() worker_processes.append(worker_process) logger.info('Waiting for the workers...') for worker_process in worker_processes: worker_process.join() logger.info('Workers finished.') write_queue.put({'event_type': EVENT_TYPE_EXIT}) write_process.join() elapsed_time = (time.time() - start_time) elapsed_time_unit = 'seconds' if (elapsed_time > 60): elapsed_time /= 60 elapsed_time_unit = 'minutes' if (elapsed_time > 60): elapsed_time /= 60 elapsed_time_unit = 'hours' if (elapsed_time > 24): elapsed_time /= 24 elapsed_time_unit = 'days' logger.info('Total time: {} {}'.format(elapsed_time, elapsed_time_unit))
def multiprocess_by_address(addresses, worker_class, output_file_path, output_field_names, num_processes=None, log_every=5): '\n Addresses are put into a read queue.\n Several workers are spawn with one SQLAlchemy session each (should be used for read only queries).\n Each worker takes addresses from the read queue and puts outputs into a write queue (in dictionary format).\n After all the workers are done, the outputs are taken from write the queue and added to the session.\n At the end the session is committed.\n :param addresses: contract address to process\n :param worker_class: the one that actually does the processing\n :param output_file_path:\n :param output_field_names:\n :param num_processes: how many workers should be spawned\n :param log_every: amount of seconds between between count logs\n ' start_time = time.time() if (num_processes is None): num_processes = (cpu_count() - 1) read_queue = Queue() write_queue = Queue() for address in addresses: read_queue.put(address) write_process = Process(target=write_worker, args=(write_queue, output_file_path, output_field_names)) write_process.start() count_process = Process(target=count_worker, args=(read_queue, log_every)) count_process.start() worker_processes = [] for _ in range(num_processes): worker_process = Process(target=worker_wrapper, args=(read_queue, worker_class, write_queue)) worker_process.start() worker_processes.append(worker_process) logger.info('Waiting for the workers...') for worker_process in worker_processes: worker_process.join() logger.info('Workers finished.') write_queue.put({'event_type': EVENT_TYPE_EXIT}) write_process.join() elapsed_time = (time.time() - start_time) elapsed_time_unit = 'seconds' if (elapsed_time > 60): elapsed_time /= 60 elapsed_time_unit = 'minutes' if (elapsed_time > 60): elapsed_time /= 60 elapsed_time_unit = 'hours' if (elapsed_time > 24): elapsed_time /= 24 elapsed_time_unit = 'days' logger.info('Total time: {} {}'.format(elapsed_time, elapsed_time_unit))<|docstring|>Addresses are put into a read queue. Several workers are spawn with one SQLAlchemy session each (should be used for read only queries). Each worker takes addresses from the read queue and puts outputs into a write queue (in dictionary format). After all the workers are done, the outputs are taken from write the queue and added to the session. At the end the session is committed. :param addresses: contract address to process :param worker_class: the one that actually does the processing :param output_file_path: :param output_field_names: :param num_processes: how many workers should be spawned :param log_every: amount of seconds between between count logs<|endoftext|>
a90a62530b8f5df4814856041857a0b7b03a12fae6b7fb808f40540c404de9ed
def __init__(self, sqlalchemy_session, write_queue): '\n :param sqlalchemy_session: to query the database (should be read only queries)\n :param write_queue: should put model instances\n ' self.sqlalchemy_session = sqlalchemy_session self.write_queue = write_queue self.logger = logger
:param sqlalchemy_session: to query the database (should be read only queries) :param write_queue: should put model instances
honeypot_detection/multiprocess_by_address.py
__init__
betaswap/honeypot-detection
0
python
def __init__(self, sqlalchemy_session, write_queue): '\n :param sqlalchemy_session: to query the database (should be read only queries)\n :param write_queue: should put model instances\n ' self.sqlalchemy_session = sqlalchemy_session self.write_queue = write_queue self.logger = logger
def __init__(self, sqlalchemy_session, write_queue): '\n :param sqlalchemy_session: to query the database (should be read only queries)\n :param write_queue: should put model instances\n ' self.sqlalchemy_session = sqlalchemy_session self.write_queue = write_queue self.logger = logger<|docstring|>:param sqlalchemy_session: to query the database (should be read only queries) :param write_queue: should put model instances<|endoftext|>
68c40cf30f2318325fe131effbb1256cd813a40fa2177ef0b1d8085361fcb4b0
def process_address(self, address): '\n Process a contract and write outputs in a queue.\n :param address: contract address to process\n ' raise NotImplementedError
Process a contract and write outputs in a queue. :param address: contract address to process
honeypot_detection/multiprocess_by_address.py
process_address
betaswap/honeypot-detection
0
python
def process_address(self, address): '\n Process a contract and write outputs in a queue.\n :param address: contract address to process\n ' raise NotImplementedError
def process_address(self, address): '\n Process a contract and write outputs in a queue.\n :param address: contract address to process\n ' raise NotImplementedError<|docstring|>Process a contract and write outputs in a queue. :param address: contract address to process<|endoftext|>
dd5ae7ca0d282beb790913093dd5b93c1554d2d74ea89166a6f06da0888e919a
def __getattr__(self, key): 'Return arbitrary attributes.' if key.startswith('os_'): key = key[3:] if (key in [attr.replace('-', '_') for attr in self.config]): return self.config[key] else: return None
Return arbitrary attributes.
os_client_config/cloud_config.py
__getattr__
mail2nsrajesh/os-client-config
0
python
def __getattr__(self, key): if key.startswith('os_'): key = key[3:] if (key in [attr.replace('-', '_') for attr in self.config]): return self.config[key] else: return None
def __getattr__(self, key): if key.startswith('os_'): key = key[3:] if (key in [attr.replace('-', '_') for attr in self.config]): return self.config[key] else: return None<|docstring|>Return arbitrary attributes.<|endoftext|>
3ce9317c8d71b1eff4bc2ca9b564fd574c26c286de0176b3248acada888315db
def set_session_constructor(self, session_constructor): 'Sets the Session constructor.' self._session_constructor = session_constructor
Sets the Session constructor.
os_client_config/cloud_config.py
set_session_constructor
mail2nsrajesh/os-client-config
0
python
def set_session_constructor(self, session_constructor): self._session_constructor = session_constructor
def set_session_constructor(self, session_constructor): self._session_constructor = session_constructor<|docstring|>Sets the Session constructor.<|endoftext|>
f6eac1cca0150cb9ad1ebac76596f52a0c81b34fea1d730c760f21157fa489a0
def get_requests_verify_args(self): 'Return the verify and cert values for the requests library.' if (self.config['verify'] and self.config['cacert']): verify = self.config['cacert'] else: verify = self.config['verify'] if self.config['cacert']: warnings.warn('You are specifying a cacert for the cloud {0} but also to ignore the host verification. The host SSL cert will not be verified.'.format(self.name)) cert = self.config.get('cert', None) if cert: if self.config['key']: cert = (cert, self.config['key']) return (verify, cert)
Return the verify and cert values for the requests library.
os_client_config/cloud_config.py
get_requests_verify_args
mail2nsrajesh/os-client-config
0
python
def get_requests_verify_args(self): if (self.config['verify'] and self.config['cacert']): verify = self.config['cacert'] else: verify = self.config['verify'] if self.config['cacert']: warnings.warn('You are specifying a cacert for the cloud {0} but also to ignore the host verification. The host SSL cert will not be verified.'.format(self.name)) cert = self.config.get('cert', None) if cert: if self.config['key']: cert = (cert, self.config['key']) return (verify, cert)
def get_requests_verify_args(self): if (self.config['verify'] and self.config['cacert']): verify = self.config['cacert'] else: verify = self.config['verify'] if self.config['cacert']: warnings.warn('You are specifying a cacert for the cloud {0} but also to ignore the host verification. The host SSL cert will not be verified.'.format(self.name)) cert = self.config.get('cert', None) if cert: if self.config['key']: cert = (cert, self.config['key']) return (verify, cert)<|docstring|>Return the verify and cert values for the requests library.<|endoftext|>
c83831e13b9e66dedcb6450000f7a89f4fe38b8f238da79331bf4c531b825bce
def get_services(self): 'Return a list of service types we know something about.' services = [] for (key, val) in self.config.items(): if (key.endswith('api_version') or key.endswith('service_type') or key.endswith('service_name')): services.append('_'.join(key.split('_')[:(- 2)])) return list(set(services))
Return a list of service types we know something about.
os_client_config/cloud_config.py
get_services
mail2nsrajesh/os-client-config
0
python
def get_services(self): services = [] for (key, val) in self.config.items(): if (key.endswith('api_version') or key.endswith('service_type') or key.endswith('service_name')): services.append('_'.join(key.split('_')[:(- 2)])) return list(set(services))
def get_services(self): services = [] for (key, val) in self.config.items(): if (key.endswith('api_version') or key.endswith('service_type') or key.endswith('service_name')): services.append('_'.join(key.split('_')[:(- 2)])) return list(set(services))<|docstring|>Return a list of service types we know something about.<|endoftext|>
c4b1794499ca5b3cda66491376b92344d34c01e6938c20ed6a40f6d1e95a668c
def get_auth(self): 'Return a keystoneauth plugin from the auth credentials.' return self._auth
Return a keystoneauth plugin from the auth credentials.
os_client_config/cloud_config.py
get_auth
mail2nsrajesh/os-client-config
0
python
def get_auth(self): return self._auth
def get_auth(self): return self._auth<|docstring|>Return a keystoneauth plugin from the auth credentials.<|endoftext|>
325d230c79ef49c34e95dfa02062266c8b63866decea6a438a1e69ebd3114f7e
def get_session(self): 'Return a keystoneauth session based on the auth credentials.' if (self._keystone_session is None): if (not self._auth): raise exceptions.OpenStackConfigException('Problem with auth parameters') (verify, cert) = self.get_requests_verify_args() if (not verify): self.log.debug('Turning off SSL warnings for {cloud}:{region} since verify=False'.format(cloud=self.name, region=self.region)) requestsexceptions.squelch_warnings(insecure_requests=(not verify)) self._keystone_session = self._session_constructor(auth=self._auth, verify=verify, cert=cert, timeout=self.config['api_timeout']) if hasattr(self._keystone_session, 'additional_user_agent'): self._keystone_session.additional_user_agent.append(('os-client-config', os_client_config.__version__)) if hasattr(self._keystone_session, 'app_name'): self._keystone_session.app_name = self._app_name if hasattr(self._keystone_session, 'app_version'): self._keystone_session.app_version = self._app_version return self._keystone_session
Return a keystoneauth session based on the auth credentials.
os_client_config/cloud_config.py
get_session
mail2nsrajesh/os-client-config
0
python
def get_session(self): if (self._keystone_session is None): if (not self._auth): raise exceptions.OpenStackConfigException('Problem with auth parameters') (verify, cert) = self.get_requests_verify_args() if (not verify): self.log.debug('Turning off SSL warnings for {cloud}:{region} since verify=False'.format(cloud=self.name, region=self.region)) requestsexceptions.squelch_warnings(insecure_requests=(not verify)) self._keystone_session = self._session_constructor(auth=self._auth, verify=verify, cert=cert, timeout=self.config['api_timeout']) if hasattr(self._keystone_session, 'additional_user_agent'): self._keystone_session.additional_user_agent.append(('os-client-config', os_client_config.__version__)) if hasattr(self._keystone_session, 'app_name'): self._keystone_session.app_name = self._app_name if hasattr(self._keystone_session, 'app_version'): self._keystone_session.app_version = self._app_version return self._keystone_session
def get_session(self): if (self._keystone_session is None): if (not self._auth): raise exceptions.OpenStackConfigException('Problem with auth parameters') (verify, cert) = self.get_requests_verify_args() if (not verify): self.log.debug('Turning off SSL warnings for {cloud}:{region} since verify=False'.format(cloud=self.name, region=self.region)) requestsexceptions.squelch_warnings(insecure_requests=(not verify)) self._keystone_session = self._session_constructor(auth=self._auth, verify=verify, cert=cert, timeout=self.config['api_timeout']) if hasattr(self._keystone_session, 'additional_user_agent'): self._keystone_session.additional_user_agent.append(('os-client-config', os_client_config.__version__)) if hasattr(self._keystone_session, 'app_name'): self._keystone_session.app_name = self._app_name if hasattr(self._keystone_session, 'app_version'): self._keystone_session.app_version = self._app_version return self._keystone_session<|docstring|>Return a keystoneauth session based on the auth credentials.<|endoftext|>
cdbac1bd81f5527797a483ad14a7b4aa953db323f783956c8d5dacdda6281789
def get_service_catalog(self): 'Helper method to grab the service catalog.' return self._auth.get_access(self.get_session()).service_catalog
Helper method to grab the service catalog.
os_client_config/cloud_config.py
get_service_catalog
mail2nsrajesh/os-client-config
0
python
def get_service_catalog(self): return self._auth.get_access(self.get_session()).service_catalog
def get_service_catalog(self): return self._auth.get_access(self.get_session()).service_catalog<|docstring|>Helper method to grab the service catalog.<|endoftext|>
8debe562e2def300d2b37cb2a8adcc6e1b6ff8aaf3f61fcde9ec9c3076cb638f
def get_session_client(self, service_key): "Return a prepped requests adapter for a given service.\n\n This is useful for making direct requests calls against a\n 'mounted' endpoint. That is, if you do:\n\n client = get_session_client('compute')\n\n then you can do:\n\n client.get('/flavors')\n\n and it will work like you think.\n " return adapter.Adapter(session=self.get_session(), service_type=self.get_service_type(service_key), service_name=self.get_service_name(service_key), interface=self.get_interface(service_key), region_name=self.region)
Return a prepped requests adapter for a given service. This is useful for making direct requests calls against a 'mounted' endpoint. That is, if you do: client = get_session_client('compute') then you can do: client.get('/flavors') and it will work like you think.
os_client_config/cloud_config.py
get_session_client
mail2nsrajesh/os-client-config
0
python
def get_session_client(self, service_key): "Return a prepped requests adapter for a given service.\n\n This is useful for making direct requests calls against a\n 'mounted' endpoint. That is, if you do:\n\n client = get_session_client('compute')\n\n then you can do:\n\n client.get('/flavors')\n\n and it will work like you think.\n " return adapter.Adapter(session=self.get_session(), service_type=self.get_service_type(service_key), service_name=self.get_service_name(service_key), interface=self.get_interface(service_key), region_name=self.region)
def get_session_client(self, service_key): "Return a prepped requests adapter for a given service.\n\n This is useful for making direct requests calls against a\n 'mounted' endpoint. That is, if you do:\n\n client = get_session_client('compute')\n\n then you can do:\n\n client.get('/flavors')\n\n and it will work like you think.\n " return adapter.Adapter(session=self.get_session(), service_type=self.get_service_type(service_key), service_name=self.get_service_name(service_key), interface=self.get_interface(service_key), region_name=self.region)<|docstring|>Return a prepped requests adapter for a given service. This is useful for making direct requests calls against a 'mounted' endpoint. That is, if you do: client = get_session_client('compute') then you can do: client.get('/flavors') and it will work like you think.<|endoftext|>
a07248d2f358c1084a3cce373c690df2bb8d0413c7cf481d003b69a1934ebfdf
def get_session_endpoint(self, service_key, min_version=None, max_version=None): "Return the endpoint from config or the catalog.\n\n If a configuration lists an explicit endpoint for a service,\n return that. Otherwise, fetch the service catalog from the\n keystone session and return the appropriate endpoint.\n\n :param service_key: Generic key for service, such as 'compute' or\n 'network'\n\n " override_endpoint = self.get_endpoint(service_key) if override_endpoint: return override_endpoint endpoint = None kwargs = {'service_name': self.get_service_name(service_key), 'region_name': self.region} kwargs['interface'] = self.get_interface(service_key) if ((service_key == 'volume') and (not self.get_api_version('volume'))): min_version = float((min_version or 1)) max_version = float((max_version or 3)) min_major = math.trunc(float(min_version)) max_major = math.trunc(float(max_version)) versions = range((int(max_major) + 1), int(min_major), (- 1)) service_types = [] for version in versions: if (version == 1): service_types.append('volume') else: service_types.append('volumev{v}'.format(v=version)) else: service_types = [self.get_service_type(service_key)] endpoint = self._get_highest_endpoint(service_types, kwargs) if (not endpoint): self.log.warning('Keystone catalog entry not found (service_type=%s,service_name=%sinterface=%s,region_name=%s)', service_key, kwargs['service_name'], kwargs['interface'], kwargs['region_name']) return endpoint
Return the endpoint from config or the catalog. If a configuration lists an explicit endpoint for a service, return that. Otherwise, fetch the service catalog from the keystone session and return the appropriate endpoint. :param service_key: Generic key for service, such as 'compute' or 'network'
os_client_config/cloud_config.py
get_session_endpoint
mail2nsrajesh/os-client-config
0
python
def get_session_endpoint(self, service_key, min_version=None, max_version=None): "Return the endpoint from config or the catalog.\n\n If a configuration lists an explicit endpoint for a service,\n return that. Otherwise, fetch the service catalog from the\n keystone session and return the appropriate endpoint.\n\n :param service_key: Generic key for service, such as 'compute' or\n 'network'\n\n " override_endpoint = self.get_endpoint(service_key) if override_endpoint: return override_endpoint endpoint = None kwargs = {'service_name': self.get_service_name(service_key), 'region_name': self.region} kwargs['interface'] = self.get_interface(service_key) if ((service_key == 'volume') and (not self.get_api_version('volume'))): min_version = float((min_version or 1)) max_version = float((max_version or 3)) min_major = math.trunc(float(min_version)) max_major = math.trunc(float(max_version)) versions = range((int(max_major) + 1), int(min_major), (- 1)) service_types = [] for version in versions: if (version == 1): service_types.append('volume') else: service_types.append('volumev{v}'.format(v=version)) else: service_types = [self.get_service_type(service_key)] endpoint = self._get_highest_endpoint(service_types, kwargs) if (not endpoint): self.log.warning('Keystone catalog entry not found (service_type=%s,service_name=%sinterface=%s,region_name=%s)', service_key, kwargs['service_name'], kwargs['interface'], kwargs['region_name']) return endpoint
def get_session_endpoint(self, service_key, min_version=None, max_version=None): "Return the endpoint from config or the catalog.\n\n If a configuration lists an explicit endpoint for a service,\n return that. Otherwise, fetch the service catalog from the\n keystone session and return the appropriate endpoint.\n\n :param service_key: Generic key for service, such as 'compute' or\n 'network'\n\n " override_endpoint = self.get_endpoint(service_key) if override_endpoint: return override_endpoint endpoint = None kwargs = {'service_name': self.get_service_name(service_key), 'region_name': self.region} kwargs['interface'] = self.get_interface(service_key) if ((service_key == 'volume') and (not self.get_api_version('volume'))): min_version = float((min_version or 1)) max_version = float((max_version or 3)) min_major = math.trunc(float(min_version)) max_major = math.trunc(float(max_version)) versions = range((int(max_major) + 1), int(min_major), (- 1)) service_types = [] for version in versions: if (version == 1): service_types.append('volume') else: service_types.append('volumev{v}'.format(v=version)) else: service_types = [self.get_service_type(service_key)] endpoint = self._get_highest_endpoint(service_types, kwargs) if (not endpoint): self.log.warning('Keystone catalog entry not found (service_type=%s,service_name=%sinterface=%s,region_name=%s)', service_key, kwargs['service_name'], kwargs['interface'], kwargs['region_name']) return endpoint<|docstring|>Return the endpoint from config or the catalog. If a configuration lists an explicit endpoint for a service, return that. Otherwise, fetch the service catalog from the keystone session and return the appropriate endpoint. :param service_key: Generic key for service, such as 'compute' or 'network'<|endoftext|>
7e2a7d157776e7195e081ae5ab803c4f4a392979c5200d443ba05b68fe13ad1b
def get_legacy_client(self, service_key, client_class=None, interface_key=None, pass_version_arg=True, version=None, min_version=None, max_version=None, **kwargs): "Return a legacy OpenStack client object for the given config.\n\n Most of the OpenStack python-*client libraries have the same\n interface for their client constructors, but there are several\n parameters one wants to pass given a :class:`CloudConfig` object.\n\n In the future, OpenStack API consumption should be done through\n the OpenStack SDK, but that's not ready yet. This is for getting\n Client objects from python-*client only.\n\n :param service_key: Generic key for service, such as 'compute' or\n 'network'\n :param client_class: Class of the client to be instantiated. This\n should be the unversioned version if there\n is one, such as novaclient.client.Client, or\n the versioned one, such as\n neutronclient.v2_0.client.Client if there isn't\n :param interface_key: (optional) Some clients, such as glanceclient\n only accept the parameter 'interface' instead\n of 'endpoint_type' - this is a get-out-of-jail\n parameter for those until they can be aligned.\n os-client-config understands this to be the\n case if service_key is image, so this is really\n only for use with other unknown broken clients.\n :param pass_version_arg: (optional) If a versioned Client constructor\n was passed to client_class, set this to\n False, which will tell get_client to not\n pass a version parameter. os-client-config\n already understand that this is the\n case for network, so it can be omitted in\n that case.\n :param version: (optional) Version string to override the configured\n version string.\n :param min_version: (options) Minimum version acceptable.\n :param max_version: (options) Maximum version acceptable.\n :param kwargs: (optional) keyword args are passed through to the\n Client constructor, so this is in case anything\n additional needs to be passed in.\n " if (not client_class): client_class = _get_client(service_key) interface = self.get_interface(service_key) endpoint = self.get_session_endpoint(service_key, min_version=min_version, max_version=max_version) endpoint_override = self.get_endpoint(service_key) if (service_key == 'object-store'): constructor_kwargs = dict(session=self.get_session(), os_options=dict(service_type=self.get_service_type(service_key), object_storage_url=endpoint_override, region_name=self.region)) else: constructor_kwargs = dict(session=self.get_session(), service_name=self.get_service_name(service_key), service_type=self.get_service_type(service_key), endpoint_override=endpoint_override, region_name=self.region) if (service_key == 'image'): from glanceclient.common import utils as glance_utils (endpoint, detected_version) = glance_utils.strip_version(endpoint) if (not version): version = detected_version if (not endpoint_override): constructor_kwargs['endpoint_override'] = endpoint constructor_kwargs.update(kwargs) if (pass_version_arg and (service_key != 'object-store')): if (not version): version = self.get_api_version(service_key) if ((not version) and (service_key == 'volume')): from cinderclient import client as cinder_client version = cinder_client.get_volume_api_from_url(endpoint) if ((service_key == 'network') and (version == '2')): version = '2.0' if (service_key == 'identity'): if ('endpoint' not in constructor_kwargs): endpoint = self.get_session_endpoint('identity') constructor_kwargs['endpoint'] = endpoint if (service_key == 'network'): constructor_kwargs['api_version'] = version elif (service_key == 'baremetal'): if (version != '1'): constructor_kwargs['os_ironic_api_version'] = version constructor_kwargs['version'] = version[0] else: constructor_kwargs['version'] = version if (min_version and (min_version > float(version))): raise exceptions.OpenStackConfigVersionException('Minimum version {min_version} requested but {version} found'.format(min_version=min_version, version=version), version=version) if (max_version and (max_version < float(version))): raise exceptions.OpenStackConfigVersionException('Maximum version {max_version} requested but {version} found'.format(max_version=max_version, version=version), version=version) if (service_key == 'database'): constructor_kwargs['username'] = None constructor_kwargs['password'] = None if (not interface_key): if (service_key in ('image', 'key-manager')): interface_key = 'interface' elif ((service_key == 'identity') and version and version.startswith('3')): interface_key = 'interface' else: interface_key = 'endpoint_type' if (service_key == 'object-store'): constructor_kwargs['os_options'][interface_key] = interface else: constructor_kwargs[interface_key] = interface return client_class(**constructor_kwargs)
Return a legacy OpenStack client object for the given config. Most of the OpenStack python-*client libraries have the same interface for their client constructors, but there are several parameters one wants to pass given a :class:`CloudConfig` object. In the future, OpenStack API consumption should be done through the OpenStack SDK, but that's not ready yet. This is for getting Client objects from python-*client only. :param service_key: Generic key for service, such as 'compute' or 'network' :param client_class: Class of the client to be instantiated. This should be the unversioned version if there is one, such as novaclient.client.Client, or the versioned one, such as neutronclient.v2_0.client.Client if there isn't :param interface_key: (optional) Some clients, such as glanceclient only accept the parameter 'interface' instead of 'endpoint_type' - this is a get-out-of-jail parameter for those until they can be aligned. os-client-config understands this to be the case if service_key is image, so this is really only for use with other unknown broken clients. :param pass_version_arg: (optional) If a versioned Client constructor was passed to client_class, set this to False, which will tell get_client to not pass a version parameter. os-client-config already understand that this is the case for network, so it can be omitted in that case. :param version: (optional) Version string to override the configured version string. :param min_version: (options) Minimum version acceptable. :param max_version: (options) Maximum version acceptable. :param kwargs: (optional) keyword args are passed through to the Client constructor, so this is in case anything additional needs to be passed in.
os_client_config/cloud_config.py
get_legacy_client
mail2nsrajesh/os-client-config
0
python
def get_legacy_client(self, service_key, client_class=None, interface_key=None, pass_version_arg=True, version=None, min_version=None, max_version=None, **kwargs): "Return a legacy OpenStack client object for the given config.\n\n Most of the OpenStack python-*client libraries have the same\n interface for their client constructors, but there are several\n parameters one wants to pass given a :class:`CloudConfig` object.\n\n In the future, OpenStack API consumption should be done through\n the OpenStack SDK, but that's not ready yet. This is for getting\n Client objects from python-*client only.\n\n :param service_key: Generic key for service, such as 'compute' or\n 'network'\n :param client_class: Class of the client to be instantiated. This\n should be the unversioned version if there\n is one, such as novaclient.client.Client, or\n the versioned one, such as\n neutronclient.v2_0.client.Client if there isn't\n :param interface_key: (optional) Some clients, such as glanceclient\n only accept the parameter 'interface' instead\n of 'endpoint_type' - this is a get-out-of-jail\n parameter for those until they can be aligned.\n os-client-config understands this to be the\n case if service_key is image, so this is really\n only for use with other unknown broken clients.\n :param pass_version_arg: (optional) If a versioned Client constructor\n was passed to client_class, set this to\n False, which will tell get_client to not\n pass a version parameter. os-client-config\n already understand that this is the\n case for network, so it can be omitted in\n that case.\n :param version: (optional) Version string to override the configured\n version string.\n :param min_version: (options) Minimum version acceptable.\n :param max_version: (options) Maximum version acceptable.\n :param kwargs: (optional) keyword args are passed through to the\n Client constructor, so this is in case anything\n additional needs to be passed in.\n " if (not client_class): client_class = _get_client(service_key) interface = self.get_interface(service_key) endpoint = self.get_session_endpoint(service_key, min_version=min_version, max_version=max_version) endpoint_override = self.get_endpoint(service_key) if (service_key == 'object-store'): constructor_kwargs = dict(session=self.get_session(), os_options=dict(service_type=self.get_service_type(service_key), object_storage_url=endpoint_override, region_name=self.region)) else: constructor_kwargs = dict(session=self.get_session(), service_name=self.get_service_name(service_key), service_type=self.get_service_type(service_key), endpoint_override=endpoint_override, region_name=self.region) if (service_key == 'image'): from glanceclient.common import utils as glance_utils (endpoint, detected_version) = glance_utils.strip_version(endpoint) if (not version): version = detected_version if (not endpoint_override): constructor_kwargs['endpoint_override'] = endpoint constructor_kwargs.update(kwargs) if (pass_version_arg and (service_key != 'object-store')): if (not version): version = self.get_api_version(service_key) if ((not version) and (service_key == 'volume')): from cinderclient import client as cinder_client version = cinder_client.get_volume_api_from_url(endpoint) if ((service_key == 'network') and (version == '2')): version = '2.0' if (service_key == 'identity'): if ('endpoint' not in constructor_kwargs): endpoint = self.get_session_endpoint('identity') constructor_kwargs['endpoint'] = endpoint if (service_key == 'network'): constructor_kwargs['api_version'] = version elif (service_key == 'baremetal'): if (version != '1'): constructor_kwargs['os_ironic_api_version'] = version constructor_kwargs['version'] = version[0] else: constructor_kwargs['version'] = version if (min_version and (min_version > float(version))): raise exceptions.OpenStackConfigVersionException('Minimum version {min_version} requested but {version} found'.format(min_version=min_version, version=version), version=version) if (max_version and (max_version < float(version))): raise exceptions.OpenStackConfigVersionException('Maximum version {max_version} requested but {version} found'.format(max_version=max_version, version=version), version=version) if (service_key == 'database'): constructor_kwargs['username'] = None constructor_kwargs['password'] = None if (not interface_key): if (service_key in ('image', 'key-manager')): interface_key = 'interface' elif ((service_key == 'identity') and version and version.startswith('3')): interface_key = 'interface' else: interface_key = 'endpoint_type' if (service_key == 'object-store'): constructor_kwargs['os_options'][interface_key] = interface else: constructor_kwargs[interface_key] = interface return client_class(**constructor_kwargs)
def get_legacy_client(self, service_key, client_class=None, interface_key=None, pass_version_arg=True, version=None, min_version=None, max_version=None, **kwargs): "Return a legacy OpenStack client object for the given config.\n\n Most of the OpenStack python-*client libraries have the same\n interface for their client constructors, but there are several\n parameters one wants to pass given a :class:`CloudConfig` object.\n\n In the future, OpenStack API consumption should be done through\n the OpenStack SDK, but that's not ready yet. This is for getting\n Client objects from python-*client only.\n\n :param service_key: Generic key for service, such as 'compute' or\n 'network'\n :param client_class: Class of the client to be instantiated. This\n should be the unversioned version if there\n is one, such as novaclient.client.Client, or\n the versioned one, such as\n neutronclient.v2_0.client.Client if there isn't\n :param interface_key: (optional) Some clients, such as glanceclient\n only accept the parameter 'interface' instead\n of 'endpoint_type' - this is a get-out-of-jail\n parameter for those until they can be aligned.\n os-client-config understands this to be the\n case if service_key is image, so this is really\n only for use with other unknown broken clients.\n :param pass_version_arg: (optional) If a versioned Client constructor\n was passed to client_class, set this to\n False, which will tell get_client to not\n pass a version parameter. os-client-config\n already understand that this is the\n case for network, so it can be omitted in\n that case.\n :param version: (optional) Version string to override the configured\n version string.\n :param min_version: (options) Minimum version acceptable.\n :param max_version: (options) Maximum version acceptable.\n :param kwargs: (optional) keyword args are passed through to the\n Client constructor, so this is in case anything\n additional needs to be passed in.\n " if (not client_class): client_class = _get_client(service_key) interface = self.get_interface(service_key) endpoint = self.get_session_endpoint(service_key, min_version=min_version, max_version=max_version) endpoint_override = self.get_endpoint(service_key) if (service_key == 'object-store'): constructor_kwargs = dict(session=self.get_session(), os_options=dict(service_type=self.get_service_type(service_key), object_storage_url=endpoint_override, region_name=self.region)) else: constructor_kwargs = dict(session=self.get_session(), service_name=self.get_service_name(service_key), service_type=self.get_service_type(service_key), endpoint_override=endpoint_override, region_name=self.region) if (service_key == 'image'): from glanceclient.common import utils as glance_utils (endpoint, detected_version) = glance_utils.strip_version(endpoint) if (not version): version = detected_version if (not endpoint_override): constructor_kwargs['endpoint_override'] = endpoint constructor_kwargs.update(kwargs) if (pass_version_arg and (service_key != 'object-store')): if (not version): version = self.get_api_version(service_key) if ((not version) and (service_key == 'volume')): from cinderclient import client as cinder_client version = cinder_client.get_volume_api_from_url(endpoint) if ((service_key == 'network') and (version == '2')): version = '2.0' if (service_key == 'identity'): if ('endpoint' not in constructor_kwargs): endpoint = self.get_session_endpoint('identity') constructor_kwargs['endpoint'] = endpoint if (service_key == 'network'): constructor_kwargs['api_version'] = version elif (service_key == 'baremetal'): if (version != '1'): constructor_kwargs['os_ironic_api_version'] = version constructor_kwargs['version'] = version[0] else: constructor_kwargs['version'] = version if (min_version and (min_version > float(version))): raise exceptions.OpenStackConfigVersionException('Minimum version {min_version} requested but {version} found'.format(min_version=min_version, version=version), version=version) if (max_version and (max_version < float(version))): raise exceptions.OpenStackConfigVersionException('Maximum version {max_version} requested but {version} found'.format(max_version=max_version, version=version), version=version) if (service_key == 'database'): constructor_kwargs['username'] = None constructor_kwargs['password'] = None if (not interface_key): if (service_key in ('image', 'key-manager')): interface_key = 'interface' elif ((service_key == 'identity') and version and version.startswith('3')): interface_key = 'interface' else: interface_key = 'endpoint_type' if (service_key == 'object-store'): constructor_kwargs['os_options'][interface_key] = interface else: constructor_kwargs[interface_key] = interface return client_class(**constructor_kwargs)<|docstring|>Return a legacy OpenStack client object for the given config. Most of the OpenStack python-*client libraries have the same interface for their client constructors, but there are several parameters one wants to pass given a :class:`CloudConfig` object. In the future, OpenStack API consumption should be done through the OpenStack SDK, but that's not ready yet. This is for getting Client objects from python-*client only. :param service_key: Generic key for service, such as 'compute' or 'network' :param client_class: Class of the client to be instantiated. This should be the unversioned version if there is one, such as novaclient.client.Client, or the versioned one, such as neutronclient.v2_0.client.Client if there isn't :param interface_key: (optional) Some clients, such as glanceclient only accept the parameter 'interface' instead of 'endpoint_type' - this is a get-out-of-jail parameter for those until they can be aligned. os-client-config understands this to be the case if service_key is image, so this is really only for use with other unknown broken clients. :param pass_version_arg: (optional) If a versioned Client constructor was passed to client_class, set this to False, which will tell get_client to not pass a version parameter. os-client-config already understand that this is the case for network, so it can be omitted in that case. :param version: (optional) Version string to override the configured version string. :param min_version: (options) Minimum version acceptable. :param max_version: (options) Maximum version acceptable. :param kwargs: (optional) keyword args are passed through to the Client constructor, so this is in case anything additional needs to be passed in.<|endoftext|>
85995cdbfbb1c8675e0819869332a134197583ab1c9448e6ed308765754cd070
def get_cache_resource_expiration(self, resource, default=None): 'Get expiration time for a resource\n\n :param resource: Name of the resource type\n :param default: Default value to return if not found (optional,\n defaults to None)\n\n :returns: Expiration time for the resource type as float or default\n ' if self._openstack_config: expiration = self._openstack_config.get_cache_expiration() if (resource not in expiration): return default return float(expiration[resource])
Get expiration time for a resource :param resource: Name of the resource type :param default: Default value to return if not found (optional, defaults to None) :returns: Expiration time for the resource type as float or default
os_client_config/cloud_config.py
get_cache_resource_expiration
mail2nsrajesh/os-client-config
0
python
def get_cache_resource_expiration(self, resource, default=None): 'Get expiration time for a resource\n\n :param resource: Name of the resource type\n :param default: Default value to return if not found (optional,\n defaults to None)\n\n :returns: Expiration time for the resource type as float or default\n ' if self._openstack_config: expiration = self._openstack_config.get_cache_expiration() if (resource not in expiration): return default return float(expiration[resource])
def get_cache_resource_expiration(self, resource, default=None): 'Get expiration time for a resource\n\n :param resource: Name of the resource type\n :param default: Default value to return if not found (optional,\n defaults to None)\n\n :returns: Expiration time for the resource type as float or default\n ' if self._openstack_config: expiration = self._openstack_config.get_cache_expiration() if (resource not in expiration): return default return float(expiration[resource])<|docstring|>Get expiration time for a resource :param resource: Name of the resource type :param default: Default value to return if not found (optional, defaults to None) :returns: Expiration time for the resource type as float or default<|endoftext|>
da150863414e8bf52ac8d6638cf83f42cf41dfbae5c903ccfadc79fb2483b2df
def requires_floating_ip(self): 'Return whether or not this cloud requires floating ips.\n\n\n :returns: True of False if know, None if discovery is needed.\n If requires_floating_ip is not configured but the cloud is\n known to not provide floating ips, will return False.\n ' if (self.config['floating_ip_source'] == 'None'): return False return self.config['requires_floating_ip']
Return whether or not this cloud requires floating ips. :returns: True of False if know, None if discovery is needed. If requires_floating_ip is not configured but the cloud is known to not provide floating ips, will return False.
os_client_config/cloud_config.py
requires_floating_ip
mail2nsrajesh/os-client-config
0
python
def requires_floating_ip(self): 'Return whether or not this cloud requires floating ips.\n\n\n :returns: True of False if know, None if discovery is needed.\n If requires_floating_ip is not configured but the cloud is\n known to not provide floating ips, will return False.\n ' if (self.config['floating_ip_source'] == 'None'): return False return self.config['requires_floating_ip']
def requires_floating_ip(self): 'Return whether or not this cloud requires floating ips.\n\n\n :returns: True of False if know, None if discovery is needed.\n If requires_floating_ip is not configured but the cloud is\n known to not provide floating ips, will return False.\n ' if (self.config['floating_ip_source'] == 'None'): return False return self.config['requires_floating_ip']<|docstring|>Return whether or not this cloud requires floating ips. :returns: True of False if know, None if discovery is needed. If requires_floating_ip is not configured but the cloud is known to not provide floating ips, will return False.<|endoftext|>
699684527c1fba0045e6bfaec0c5b4a7f0bb923e427649106ca99d26c2e4cffc
def get_external_networks(self): 'Get list of network names for external networks.' return [net['name'] for net in self.config['networks'] if net['routes_externally']]
Get list of network names for external networks.
os_client_config/cloud_config.py
get_external_networks
mail2nsrajesh/os-client-config
0
python
def get_external_networks(self): return [net['name'] for net in self.config['networks'] if net['routes_externally']]
def get_external_networks(self): return [net['name'] for net in self.config['networks'] if net['routes_externally']]<|docstring|>Get list of network names for external networks.<|endoftext|>
77c7fdd58a6f380c1b8b4b0134a889b84bfbcac279600b436d387c78bc97fb4d
def get_external_ipv4_networks(self): 'Get list of network names for external IPv4 networks.' return [net['name'] for net in self.config['networks'] if net['routes_ipv4_externally']]
Get list of network names for external IPv4 networks.
os_client_config/cloud_config.py
get_external_ipv4_networks
mail2nsrajesh/os-client-config
0
python
def get_external_ipv4_networks(self): return [net['name'] for net in self.config['networks'] if net['routes_ipv4_externally']]
def get_external_ipv4_networks(self): return [net['name'] for net in self.config['networks'] if net['routes_ipv4_externally']]<|docstring|>Get list of network names for external IPv4 networks.<|endoftext|>
b71b4a7a0d1932304604e018ac1875e1eefcd42780bfa85859a8e00bbe594e1f
def get_external_ipv6_networks(self): 'Get list of network names for external IPv6 networks.' return [net['name'] for net in self.config['networks'] if net['routes_ipv6_externally']]
Get list of network names for external IPv6 networks.
os_client_config/cloud_config.py
get_external_ipv6_networks
mail2nsrajesh/os-client-config
0
python
def get_external_ipv6_networks(self): return [net['name'] for net in self.config['networks'] if net['routes_ipv6_externally']]
def get_external_ipv6_networks(self): return [net['name'] for net in self.config['networks'] if net['routes_ipv6_externally']]<|docstring|>Get list of network names for external IPv6 networks.<|endoftext|>
fe8996ac7b8f119974c88c24ff586f6ecf6458b9fe3a243459433fbbfe0fb1fd
def get_internal_networks(self): 'Get list of network names for internal networks.' return [net['name'] for net in self.config['networks'] if (not net['routes_externally'])]
Get list of network names for internal networks.
os_client_config/cloud_config.py
get_internal_networks
mail2nsrajesh/os-client-config
0
python
def get_internal_networks(self): return [net['name'] for net in self.config['networks'] if (not net['routes_externally'])]
def get_internal_networks(self): return [net['name'] for net in self.config['networks'] if (not net['routes_externally'])]<|docstring|>Get list of network names for internal networks.<|endoftext|>
baacfeb503b368282037c7896da8a7a8ae054ff9510f75532233ee97d422b6f6
def get_internal_ipv4_networks(self): 'Get list of network names for internal IPv4 networks.' return [net['name'] for net in self.config['networks'] if (not net['routes_ipv4_externally'])]
Get list of network names for internal IPv4 networks.
os_client_config/cloud_config.py
get_internal_ipv4_networks
mail2nsrajesh/os-client-config
0
python
def get_internal_ipv4_networks(self): return [net['name'] for net in self.config['networks'] if (not net['routes_ipv4_externally'])]
def get_internal_ipv4_networks(self): return [net['name'] for net in self.config['networks'] if (not net['routes_ipv4_externally'])]<|docstring|>Get list of network names for internal IPv4 networks.<|endoftext|>
52e11200dcb61d0c83fd8d50af6bdc03cc7855aa657fa098aa6ebfb15494cfc1
def get_internal_ipv6_networks(self): 'Get list of network names for internal IPv6 networks.' return [net['name'] for net in self.config['networks'] if (not net['routes_ipv6_externally'])]
Get list of network names for internal IPv6 networks.
os_client_config/cloud_config.py
get_internal_ipv6_networks
mail2nsrajesh/os-client-config
0
python
def get_internal_ipv6_networks(self): return [net['name'] for net in self.config['networks'] if (not net['routes_ipv6_externally'])]
def get_internal_ipv6_networks(self): return [net['name'] for net in self.config['networks'] if (not net['routes_ipv6_externally'])]<|docstring|>Get list of network names for internal IPv6 networks.<|endoftext|>
12064d8b1051f6b8b0110e6cded1498913d02b62578f4d6cf1d8cfe1db26e220
def get_default_network(self): 'Get network used for default interactions.' for net in self.config['networks']: if net['default_interface']: return net['name'] return None
Get network used for default interactions.
os_client_config/cloud_config.py
get_default_network
mail2nsrajesh/os-client-config
0
python
def get_default_network(self): for net in self.config['networks']: if net['default_interface']: return net['name'] return None
def get_default_network(self): for net in self.config['networks']: if net['default_interface']: return net['name'] return None<|docstring|>Get network used for default interactions.<|endoftext|>
40beddd2e4dfc12deaba21881ba51711de842098a02091aa584b70c727827c02
def get_nat_destination(self): 'Get network used for NAT destination.' for net in self.config['networks']: if net['nat_destination']: return net['name'] return None
Get network used for NAT destination.
os_client_config/cloud_config.py
get_nat_destination
mail2nsrajesh/os-client-config
0
python
def get_nat_destination(self): for net in self.config['networks']: if net['nat_destination']: return net['name'] return None
def get_nat_destination(self): for net in self.config['networks']: if net['nat_destination']: return net['name'] return None<|docstring|>Get network used for NAT destination.<|endoftext|>
15354e2a7e63af0406d92351f464fdea60adf2ca251e037d246ba1b0ffadbd9a
@pytest.fixture(scope='module', params=[testing._pytest_param()]) def _load_forward(): 'Load forward models.' fwd_free = mne.read_forward_solution(fname_fwd) fwd_free = mne.pick_types_forward(fwd_free, meg=True, eeg=False) fwd_free = mne.convert_forward_solution(fwd_free, surf_ori=False) fwd_surf = mne.convert_forward_solution(fwd_free, surf_ori=True, use_cps=False) fwd_fixed = mne.convert_forward_solution(fwd_free, force_fixed=True, use_cps=False) fwd_vol = mne.read_forward_solution(fname_fwd_vol) return (fwd_free, fwd_surf, fwd_fixed, fwd_vol)
Load forward models.
mne/beamformer/tests/test_dics.py
_load_forward
mehdikuchi/mne-python
3
python
@pytest.fixture(scope='module', params=[testing._pytest_param()]) def _load_forward(): fwd_free = mne.read_forward_solution(fname_fwd) fwd_free = mne.pick_types_forward(fwd_free, meg=True, eeg=False) fwd_free = mne.convert_forward_solution(fwd_free, surf_ori=False) fwd_surf = mne.convert_forward_solution(fwd_free, surf_ori=True, use_cps=False) fwd_fixed = mne.convert_forward_solution(fwd_free, force_fixed=True, use_cps=False) fwd_vol = mne.read_forward_solution(fname_fwd_vol) return (fwd_free, fwd_surf, fwd_fixed, fwd_vol)
@pytest.fixture(scope='module', params=[testing._pytest_param()]) def _load_forward(): fwd_free = mne.read_forward_solution(fname_fwd) fwd_free = mne.pick_types_forward(fwd_free, meg=True, eeg=False) fwd_free = mne.convert_forward_solution(fwd_free, surf_ori=False) fwd_surf = mne.convert_forward_solution(fwd_free, surf_ori=True, use_cps=False) fwd_fixed = mne.convert_forward_solution(fwd_free, force_fixed=True, use_cps=False) fwd_vol = mne.read_forward_solution(fname_fwd_vol) return (fwd_free, fwd_surf, fwd_fixed, fwd_vol)<|docstring|>Load forward models.<|endoftext|>
c70251fd6a5e1fd0812db72d3a23e97a2d1855ebbd5f7f95f7fa9825dc851fd2
def _simulate_data(fwd, idx): 'Simulate an oscillator on the cortex.' source_vertno = fwd['src'][0]['vertno'][idx] sfreq = 50.0 times = (np.arange((10 * sfreq)) / sfreq) signal = np.sin((((20 * 2) * np.pi) * times)) signal[:(len(times) // 2)] *= 2 signal *= 1e-09 stc = mne.SourceEstimate(signal[(np.newaxis, :)], vertices=[[source_vertno], []], tmin=0, tstep=(1 / sfreq), subject='sample') info = mne.create_info(fwd['info']['ch_names'], sfreq, ch_types='grad') info.update(fwd['info']) info = mne.pick_info(info, np.arange(info['nchan'])[::5]) fwd = mne.pick_channels_forward(fwd, info['ch_names']) raw = mne.apply_forward_raw(fwd, stc, info) random = np.random.RandomState(42) noise = (random.randn(*raw._data.shape) * 1e-14) raw._data += noise epochs = mne.Epochs(raw, [[0, 0, 1]], event_id=1, tmin=0, tmax=raw.times[(- 1)], baseline=(0.0, 0.0), preload=True) evoked = epochs.average() csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=10) labels = mne.read_labels_from_annot('sample', hemi='lh', subjects_dir=subjects_dir) label = [label for label in labels if np.in1d(source_vertno, label.vertices)[0]] assert (len(label) == 1) label = label[0] vertices = np.intersect1d(label.vertices, fwd['src'][0]['vertno']) source_ind = vertices.tolist().index(source_vertno) assert (vertices[source_ind] == source_vertno) return (epochs, evoked, csd, source_vertno, label, vertices, source_ind)
Simulate an oscillator on the cortex.
mne/beamformer/tests/test_dics.py
_simulate_data
mehdikuchi/mne-python
3
python
def _simulate_data(fwd, idx): source_vertno = fwd['src'][0]['vertno'][idx] sfreq = 50.0 times = (np.arange((10 * sfreq)) / sfreq) signal = np.sin((((20 * 2) * np.pi) * times)) signal[:(len(times) // 2)] *= 2 signal *= 1e-09 stc = mne.SourceEstimate(signal[(np.newaxis, :)], vertices=[[source_vertno], []], tmin=0, tstep=(1 / sfreq), subject='sample') info = mne.create_info(fwd['info']['ch_names'], sfreq, ch_types='grad') info.update(fwd['info']) info = mne.pick_info(info, np.arange(info['nchan'])[::5]) fwd = mne.pick_channels_forward(fwd, info['ch_names']) raw = mne.apply_forward_raw(fwd, stc, info) random = np.random.RandomState(42) noise = (random.randn(*raw._data.shape) * 1e-14) raw._data += noise epochs = mne.Epochs(raw, [[0, 0, 1]], event_id=1, tmin=0, tmax=raw.times[(- 1)], baseline=(0.0, 0.0), preload=True) evoked = epochs.average() csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=10) labels = mne.read_labels_from_annot('sample', hemi='lh', subjects_dir=subjects_dir) label = [label for label in labels if np.in1d(source_vertno, label.vertices)[0]] assert (len(label) == 1) label = label[0] vertices = np.intersect1d(label.vertices, fwd['src'][0]['vertno']) source_ind = vertices.tolist().index(source_vertno) assert (vertices[source_ind] == source_vertno) return (epochs, evoked, csd, source_vertno, label, vertices, source_ind)
def _simulate_data(fwd, idx): source_vertno = fwd['src'][0]['vertno'][idx] sfreq = 50.0 times = (np.arange((10 * sfreq)) / sfreq) signal = np.sin((((20 * 2) * np.pi) * times)) signal[:(len(times) // 2)] *= 2 signal *= 1e-09 stc = mne.SourceEstimate(signal[(np.newaxis, :)], vertices=[[source_vertno], []], tmin=0, tstep=(1 / sfreq), subject='sample') info = mne.create_info(fwd['info']['ch_names'], sfreq, ch_types='grad') info.update(fwd['info']) info = mne.pick_info(info, np.arange(info['nchan'])[::5]) fwd = mne.pick_channels_forward(fwd, info['ch_names']) raw = mne.apply_forward_raw(fwd, stc, info) random = np.random.RandomState(42) noise = (random.randn(*raw._data.shape) * 1e-14) raw._data += noise epochs = mne.Epochs(raw, [[0, 0, 1]], event_id=1, tmin=0, tmax=raw.times[(- 1)], baseline=(0.0, 0.0), preload=True) evoked = epochs.average() csd = csd_morlet(epochs, frequencies=[10, 20], n_cycles=[5, 10], decim=10) labels = mne.read_labels_from_annot('sample', hemi='lh', subjects_dir=subjects_dir) label = [label for label in labels if np.in1d(source_vertno, label.vertices)[0]] assert (len(label) == 1) label = label[0] vertices = np.intersect1d(label.vertices, fwd['src'][0]['vertno']) source_ind = vertices.tolist().index(source_vertno) assert (vertices[source_ind] == source_vertno) return (epochs, evoked, csd, source_vertno, label, vertices, source_ind)<|docstring|>Simulate an oscillator on the cortex.<|endoftext|>
40b1effa3a4906ea0b081b4a87659653a6031ad7d86898796a98397caa6b5847
@pytest.mark.slowtest @testing.requires_testing_data @requires_h5py @idx_param @pytest.mark.parametrize('whiten', (False, True)) def test_make_dics(tmpdir, _load_forward, idx, whiten): 'Test making DICS beamformer filters.' (fwd_free, fwd_surf, fwd_fixed, fwd_vol) = _load_forward (epochs, _, csd, _, label, vertices, source_ind) = _simulate_data(fwd_fixed, idx) with pytest.raises(ValueError, match='several sensor types'): make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None) if whiten: rng = np.random.RandomState(0) scales = mne.make_ad_hoc_cov(epochs.info).data n = scales.size data = (rng.randn(n, n) + (1j * rng.randn(n, n))) data = (data @ data.conj().T) data *= scales data *= scales[(:, np.newaxis)] data.flat[::(n + 1)] = scales noise_csd = CrossSpectralDensity(_sym_mat_to_vector(data), epochs.ch_names, 0.0, csd.n_fft) else: noise_csd = None epochs.pick_types(meg='grad') with pytest.raises(ValueError, match="Invalid value for the 'pick_ori'"): make_dics(epochs.info, fwd_fixed, csd, pick_ori='notexistent', noise_csd=noise_csd) with pytest.raises(ValueError, match='rank, if str'): make_dics(epochs.info, fwd_fixed, csd, rank='foo', noise_csd=noise_csd) with pytest.raises(TypeError, match='rank must be'): make_dics(epochs.info, fwd_fixed, csd, rank=1.0, noise_csd=noise_csd) with pytest.raises(ValueError, match='forward operator with free ori'): make_dics(epochs.info, fwd_fixed, csd, pick_ori='normal', noise_csd=noise_csd) with pytest.raises(ValueError, match='oriented in surface coordinates'): make_dics(epochs.info, fwd_free, csd, pick_ori='normal', noise_csd=noise_csd) with pytest.raises(ValueError, match='oriented in surface coordinates'): make_dics(epochs.info, fwd_vol, csd, pick_ori='normal', noise_csd=noise_csd) with pytest.raises(ValueError, match='reduce_rank cannot be used with'): make_dics(epochs.info, fwd_free, csd, inversion='single', reduce_rank=True, noise_csd=noise_csd) n_freq = len(csd.frequencies) vertices = np.intersect1d(label.vertices, fwd_free['src'][0]['vertno']) n_verts = len(vertices) n_orient = 3 n_channels = len(epochs.ch_names) weight_norm = 'unit-noise-gain' inversion = 'single' filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None, weight_norm=weight_norm, depth=None, noise_csd=noise_csd, inversion=inversion) assert (filters['weights'].shape == (n_freq, (n_verts * n_orient), n_channels)) assert np.iscomplexobj(filters['weights']) assert (filters['csd'].ch_names == epochs.ch_names) assert isinstance(filters['csd'], CrossSpectralDensity) assert (filters['ch_names'] == epochs.ch_names) assert_array_equal(filters['proj'], np.eye(n_channels)) assert_array_equal(filters['vertices'][0], vertices) assert_array_equal(filters['vertices'][1], []) assert (filters['subject'] == fwd_free['src']._subject) assert (filters['pick_ori'] is None) assert filters['is_free_ori'] assert (filters['inversion'] == inversion) assert (filters['weight_norm'] == weight_norm) assert ('DICS' in repr(filters)) assert ('subject "sample"' in repr(filters)) assert (str(len(vertices)) in repr(filters)) assert (str(n_channels) in repr(filters)) assert ('rank' not in repr(filters)) (_, noise_cov) = _prepare_noise_csd(csd, noise_csd, real_filter=False) (_, _, _, _, G, _, _, _) = _prepare_beamformer_input(epochs.info, fwd_surf, label, 'vector', combine_xyz=False, exp=None, noise_cov=noise_cov) G.shape = (n_channels, n_verts, n_orient) G = G.transpose(1, 2, 0).conj() _assert_weight_norm(filters, G) inversion = 'matrix' filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None, weight_norm=weight_norm, depth=None, noise_csd=noise_csd, inversion=inversion) _assert_weight_norm(filters, G) weight_norm = 'unit-noise-gain-invariant' inversion = 'single' filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None, weight_norm=weight_norm, depth=None, noise_csd=noise_csd, inversion=inversion) _assert_weight_norm(filters, G) weight_norm = 'unit-noise-gain' filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori='normal', weight_norm=weight_norm, depth=None, noise_csd=noise_csd, inversion=inversion) n_orient = 1 assert (filters['weights'].shape == (n_freq, (n_verts * n_orient), n_channels)) assert (not filters['is_free_ori']) _assert_weight_norm(filters, G) filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori='max-power', weight_norm=weight_norm, depth=None, noise_csd=noise_csd, inversion=inversion) n_orient = 1 assert (filters['weights'].shape == (n_freq, (n_verts * n_orient), n_channels)) assert (not filters['is_free_ori']) _assert_weight_norm(filters, G) csd = csd[0] filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori='normal', real_filter=True, noise_csd=noise_csd) assert (not np.iscomplexobj(filters['weights'])) if (not whiten): csd_noise = csd.copy() inds = np.triu_indices(csd.n_channels) csd_noise._data[(:, :)] = np.eye(csd.n_channels)[inds][(:, np.newaxis)] filters = make_dics(epochs.info, fwd_surf, csd_noise, label=label, weight_norm=None, depth=1.0, noise_csd=noise_csd, inversion='single') w = filters['weights'][0][:3] assert_allclose(np.diag(w.dot(w.conjugate().T)), 1.0, rtol=1e-06, atol=0) filters = make_dics(epochs.info, fwd_surf, csd, label=label, weight_norm=None, depth=None, noise_csd=noise_csd) w = filters['weights'][0][:3] assert (not np.allclose(np.diag(w.dot(w.conjugate().T)), 1.0, rtol=0.01, atol=0)) filters_nai = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori='max-power', weight_norm='nai', depth=None, noise_csd=noise_csd) w_nai = filters_nai['weights'][0] filters_ung = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori='max-power', weight_norm='unit-noise-gain', depth=None, noise_csd=noise_csd) w_ung = filters_ung['weights'][0] assert_allclose(np.corrcoef(np.abs(w_nai).ravel(), np.abs(w_ung).ravel()), 1, atol=1e-07) assert ('src_type' in filters) fname = op.join(str(tmpdir), 'filters-dics.h5') filters.save(fname) filters_read = read_beamformer(fname) assert isinstance(filters, Beamformer) assert isinstance(filters_read, Beamformer) for key in ['tmin', 'tmax']: setattr(filters['csd'], key, np.float64(getattr(filters['csd'], key))) assert (object_diff(filters, filters_read) == '')
Test making DICS beamformer filters.
mne/beamformer/tests/test_dics.py
test_make_dics
mehdikuchi/mne-python
3
python
@pytest.mark.slowtest @testing.requires_testing_data @requires_h5py @idx_param @pytest.mark.parametrize('whiten', (False, True)) def test_make_dics(tmpdir, _load_forward, idx, whiten): (fwd_free, fwd_surf, fwd_fixed, fwd_vol) = _load_forward (epochs, _, csd, _, label, vertices, source_ind) = _simulate_data(fwd_fixed, idx) with pytest.raises(ValueError, match='several sensor types'): make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None) if whiten: rng = np.random.RandomState(0) scales = mne.make_ad_hoc_cov(epochs.info).data n = scales.size data = (rng.randn(n, n) + (1j * rng.randn(n, n))) data = (data @ data.conj().T) data *= scales data *= scales[(:, np.newaxis)] data.flat[::(n + 1)] = scales noise_csd = CrossSpectralDensity(_sym_mat_to_vector(data), epochs.ch_names, 0.0, csd.n_fft) else: noise_csd = None epochs.pick_types(meg='grad') with pytest.raises(ValueError, match="Invalid value for the 'pick_ori'"): make_dics(epochs.info, fwd_fixed, csd, pick_ori='notexistent', noise_csd=noise_csd) with pytest.raises(ValueError, match='rank, if str'): make_dics(epochs.info, fwd_fixed, csd, rank='foo', noise_csd=noise_csd) with pytest.raises(TypeError, match='rank must be'): make_dics(epochs.info, fwd_fixed, csd, rank=1.0, noise_csd=noise_csd) with pytest.raises(ValueError, match='forward operator with free ori'): make_dics(epochs.info, fwd_fixed, csd, pick_ori='normal', noise_csd=noise_csd) with pytest.raises(ValueError, match='oriented in surface coordinates'): make_dics(epochs.info, fwd_free, csd, pick_ori='normal', noise_csd=noise_csd) with pytest.raises(ValueError, match='oriented in surface coordinates'): make_dics(epochs.info, fwd_vol, csd, pick_ori='normal', noise_csd=noise_csd) with pytest.raises(ValueError, match='reduce_rank cannot be used with'): make_dics(epochs.info, fwd_free, csd, inversion='single', reduce_rank=True, noise_csd=noise_csd) n_freq = len(csd.frequencies) vertices = np.intersect1d(label.vertices, fwd_free['src'][0]['vertno']) n_verts = len(vertices) n_orient = 3 n_channels = len(epochs.ch_names) weight_norm = 'unit-noise-gain' inversion = 'single' filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None, weight_norm=weight_norm, depth=None, noise_csd=noise_csd, inversion=inversion) assert (filters['weights'].shape == (n_freq, (n_verts * n_orient), n_channels)) assert np.iscomplexobj(filters['weights']) assert (filters['csd'].ch_names == epochs.ch_names) assert isinstance(filters['csd'], CrossSpectralDensity) assert (filters['ch_names'] == epochs.ch_names) assert_array_equal(filters['proj'], np.eye(n_channels)) assert_array_equal(filters['vertices'][0], vertices) assert_array_equal(filters['vertices'][1], []) assert (filters['subject'] == fwd_free['src']._subject) assert (filters['pick_ori'] is None) assert filters['is_free_ori'] assert (filters['inversion'] == inversion) assert (filters['weight_norm'] == weight_norm) assert ('DICS' in repr(filters)) assert ('subject "sample"' in repr(filters)) assert (str(len(vertices)) in repr(filters)) assert (str(n_channels) in repr(filters)) assert ('rank' not in repr(filters)) (_, noise_cov) = _prepare_noise_csd(csd, noise_csd, real_filter=False) (_, _, _, _, G, _, _, _) = _prepare_beamformer_input(epochs.info, fwd_surf, label, 'vector', combine_xyz=False, exp=None, noise_cov=noise_cov) G.shape = (n_channels, n_verts, n_orient) G = G.transpose(1, 2, 0).conj() _assert_weight_norm(filters, G) inversion = 'matrix' filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None, weight_norm=weight_norm, depth=None, noise_csd=noise_csd, inversion=inversion) _assert_weight_norm(filters, G) weight_norm = 'unit-noise-gain-invariant' inversion = 'single' filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None, weight_norm=weight_norm, depth=None, noise_csd=noise_csd, inversion=inversion) _assert_weight_norm(filters, G) weight_norm = 'unit-noise-gain' filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori='normal', weight_norm=weight_norm, depth=None, noise_csd=noise_csd, inversion=inversion) n_orient = 1 assert (filters['weights'].shape == (n_freq, (n_verts * n_orient), n_channels)) assert (not filters['is_free_ori']) _assert_weight_norm(filters, G) filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori='max-power', weight_norm=weight_norm, depth=None, noise_csd=noise_csd, inversion=inversion) n_orient = 1 assert (filters['weights'].shape == (n_freq, (n_verts * n_orient), n_channels)) assert (not filters['is_free_ori']) _assert_weight_norm(filters, G) csd = csd[0] filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori='normal', real_filter=True, noise_csd=noise_csd) assert (not np.iscomplexobj(filters['weights'])) if (not whiten): csd_noise = csd.copy() inds = np.triu_indices(csd.n_channels) csd_noise._data[(:, :)] = np.eye(csd.n_channels)[inds][(:, np.newaxis)] filters = make_dics(epochs.info, fwd_surf, csd_noise, label=label, weight_norm=None, depth=1.0, noise_csd=noise_csd, inversion='single') w = filters['weights'][0][:3] assert_allclose(np.diag(w.dot(w.conjugate().T)), 1.0, rtol=1e-06, atol=0) filters = make_dics(epochs.info, fwd_surf, csd, label=label, weight_norm=None, depth=None, noise_csd=noise_csd) w = filters['weights'][0][:3] assert (not np.allclose(np.diag(w.dot(w.conjugate().T)), 1.0, rtol=0.01, atol=0)) filters_nai = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori='max-power', weight_norm='nai', depth=None, noise_csd=noise_csd) w_nai = filters_nai['weights'][0] filters_ung = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori='max-power', weight_norm='unit-noise-gain', depth=None, noise_csd=noise_csd) w_ung = filters_ung['weights'][0] assert_allclose(np.corrcoef(np.abs(w_nai).ravel(), np.abs(w_ung).ravel()), 1, atol=1e-07) assert ('src_type' in filters) fname = op.join(str(tmpdir), 'filters-dics.h5') filters.save(fname) filters_read = read_beamformer(fname) assert isinstance(filters, Beamformer) assert isinstance(filters_read, Beamformer) for key in ['tmin', 'tmax']: setattr(filters['csd'], key, np.float64(getattr(filters['csd'], key))) assert (object_diff(filters, filters_read) == )
@pytest.mark.slowtest @testing.requires_testing_data @requires_h5py @idx_param @pytest.mark.parametrize('whiten', (False, True)) def test_make_dics(tmpdir, _load_forward, idx, whiten): (fwd_free, fwd_surf, fwd_fixed, fwd_vol) = _load_forward (epochs, _, csd, _, label, vertices, source_ind) = _simulate_data(fwd_fixed, idx) with pytest.raises(ValueError, match='several sensor types'): make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None) if whiten: rng = np.random.RandomState(0) scales = mne.make_ad_hoc_cov(epochs.info).data n = scales.size data = (rng.randn(n, n) + (1j * rng.randn(n, n))) data = (data @ data.conj().T) data *= scales data *= scales[(:, np.newaxis)] data.flat[::(n + 1)] = scales noise_csd = CrossSpectralDensity(_sym_mat_to_vector(data), epochs.ch_names, 0.0, csd.n_fft) else: noise_csd = None epochs.pick_types(meg='grad') with pytest.raises(ValueError, match="Invalid value for the 'pick_ori'"): make_dics(epochs.info, fwd_fixed, csd, pick_ori='notexistent', noise_csd=noise_csd) with pytest.raises(ValueError, match='rank, if str'): make_dics(epochs.info, fwd_fixed, csd, rank='foo', noise_csd=noise_csd) with pytest.raises(TypeError, match='rank must be'): make_dics(epochs.info, fwd_fixed, csd, rank=1.0, noise_csd=noise_csd) with pytest.raises(ValueError, match='forward operator with free ori'): make_dics(epochs.info, fwd_fixed, csd, pick_ori='normal', noise_csd=noise_csd) with pytest.raises(ValueError, match='oriented in surface coordinates'): make_dics(epochs.info, fwd_free, csd, pick_ori='normal', noise_csd=noise_csd) with pytest.raises(ValueError, match='oriented in surface coordinates'): make_dics(epochs.info, fwd_vol, csd, pick_ori='normal', noise_csd=noise_csd) with pytest.raises(ValueError, match='reduce_rank cannot be used with'): make_dics(epochs.info, fwd_free, csd, inversion='single', reduce_rank=True, noise_csd=noise_csd) n_freq = len(csd.frequencies) vertices = np.intersect1d(label.vertices, fwd_free['src'][0]['vertno']) n_verts = len(vertices) n_orient = 3 n_channels = len(epochs.ch_names) weight_norm = 'unit-noise-gain' inversion = 'single' filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None, weight_norm=weight_norm, depth=None, noise_csd=noise_csd, inversion=inversion) assert (filters['weights'].shape == (n_freq, (n_verts * n_orient), n_channels)) assert np.iscomplexobj(filters['weights']) assert (filters['csd'].ch_names == epochs.ch_names) assert isinstance(filters['csd'], CrossSpectralDensity) assert (filters['ch_names'] == epochs.ch_names) assert_array_equal(filters['proj'], np.eye(n_channels)) assert_array_equal(filters['vertices'][0], vertices) assert_array_equal(filters['vertices'][1], []) assert (filters['subject'] == fwd_free['src']._subject) assert (filters['pick_ori'] is None) assert filters['is_free_ori'] assert (filters['inversion'] == inversion) assert (filters['weight_norm'] == weight_norm) assert ('DICS' in repr(filters)) assert ('subject "sample"' in repr(filters)) assert (str(len(vertices)) in repr(filters)) assert (str(n_channels) in repr(filters)) assert ('rank' not in repr(filters)) (_, noise_cov) = _prepare_noise_csd(csd, noise_csd, real_filter=False) (_, _, _, _, G, _, _, _) = _prepare_beamformer_input(epochs.info, fwd_surf, label, 'vector', combine_xyz=False, exp=None, noise_cov=noise_cov) G.shape = (n_channels, n_verts, n_orient) G = G.transpose(1, 2, 0).conj() _assert_weight_norm(filters, G) inversion = 'matrix' filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None, weight_norm=weight_norm, depth=None, noise_csd=noise_csd, inversion=inversion) _assert_weight_norm(filters, G) weight_norm = 'unit-noise-gain-invariant' inversion = 'single' filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori=None, weight_norm=weight_norm, depth=None, noise_csd=noise_csd, inversion=inversion) _assert_weight_norm(filters, G) weight_norm = 'unit-noise-gain' filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori='normal', weight_norm=weight_norm, depth=None, noise_csd=noise_csd, inversion=inversion) n_orient = 1 assert (filters['weights'].shape == (n_freq, (n_verts * n_orient), n_channels)) assert (not filters['is_free_ori']) _assert_weight_norm(filters, G) filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori='max-power', weight_norm=weight_norm, depth=None, noise_csd=noise_csd, inversion=inversion) n_orient = 1 assert (filters['weights'].shape == (n_freq, (n_verts * n_orient), n_channels)) assert (not filters['is_free_ori']) _assert_weight_norm(filters, G) csd = csd[0] filters = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori='normal', real_filter=True, noise_csd=noise_csd) assert (not np.iscomplexobj(filters['weights'])) if (not whiten): csd_noise = csd.copy() inds = np.triu_indices(csd.n_channels) csd_noise._data[(:, :)] = np.eye(csd.n_channels)[inds][(:, np.newaxis)] filters = make_dics(epochs.info, fwd_surf, csd_noise, label=label, weight_norm=None, depth=1.0, noise_csd=noise_csd, inversion='single') w = filters['weights'][0][:3] assert_allclose(np.diag(w.dot(w.conjugate().T)), 1.0, rtol=1e-06, atol=0) filters = make_dics(epochs.info, fwd_surf, csd, label=label, weight_norm=None, depth=None, noise_csd=noise_csd) w = filters['weights'][0][:3] assert (not np.allclose(np.diag(w.dot(w.conjugate().T)), 1.0, rtol=0.01, atol=0)) filters_nai = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori='max-power', weight_norm='nai', depth=None, noise_csd=noise_csd) w_nai = filters_nai['weights'][0] filters_ung = make_dics(epochs.info, fwd_surf, csd, label=label, pick_ori='max-power', weight_norm='unit-noise-gain', depth=None, noise_csd=noise_csd) w_ung = filters_ung['weights'][0] assert_allclose(np.corrcoef(np.abs(w_nai).ravel(), np.abs(w_ung).ravel()), 1, atol=1e-07) assert ('src_type' in filters) fname = op.join(str(tmpdir), 'filters-dics.h5') filters.save(fname) filters_read = read_beamformer(fname) assert isinstance(filters, Beamformer) assert isinstance(filters_read, Beamformer) for key in ['tmin', 'tmax']: setattr(filters['csd'], key, np.float64(getattr(filters['csd'], key))) assert (object_diff(filters, filters_read) == )<|docstring|>Test making DICS beamformer filters.<|endoftext|>
00fd9ec9de1825b5f196c7df20de8dda5539c17519b691cd05d7ae0a57800348
@idx_param @pytest.mark.parametrize('inversion, weight_norm', [('single', None), ('matrix', 'unit-noise-gain')]) def test_apply_dics_csd(_load_forward, idx, inversion, weight_norm): 'Test applying a DICS beamformer to a CSD matrix.' (fwd_free, fwd_surf, fwd_fixed, _) = _load_forward (epochs, _, csd, source_vertno, label, vertices, source_ind) = _simulate_data(fwd_fixed, idx) reg = 1 with pytest.raises(ValueError, match='several sensor types'): make_dics(epochs.info, fwd_free, csd) epochs.pick_types(meg='grad') assert (label.hemi == 'lh') for fwd in [fwd_free, fwd_surf, fwd_fixed]: filters = make_dics(epochs.info, fwd, csd, label=label, reg=reg, inversion=inversion, weight_norm=weight_norm) (power, f) = apply_dics_csd(csd, filters) assert (f == [10, 20]) dist = _fwd_dist(power, fwd_free, vertices, source_ind) assert (dist == 0.0) assert (power.data[(source_ind, 1)] > power.data[(source_ind, 0)])
Test applying a DICS beamformer to a CSD matrix.
mne/beamformer/tests/test_dics.py
test_apply_dics_csd
mehdikuchi/mne-python
3
python
@idx_param @pytest.mark.parametrize('inversion, weight_norm', [('single', None), ('matrix', 'unit-noise-gain')]) def test_apply_dics_csd(_load_forward, idx, inversion, weight_norm): (fwd_free, fwd_surf, fwd_fixed, _) = _load_forward (epochs, _, csd, source_vertno, label, vertices, source_ind) = _simulate_data(fwd_fixed, idx) reg = 1 with pytest.raises(ValueError, match='several sensor types'): make_dics(epochs.info, fwd_free, csd) epochs.pick_types(meg='grad') assert (label.hemi == 'lh') for fwd in [fwd_free, fwd_surf, fwd_fixed]: filters = make_dics(epochs.info, fwd, csd, label=label, reg=reg, inversion=inversion, weight_norm=weight_norm) (power, f) = apply_dics_csd(csd, filters) assert (f == [10, 20]) dist = _fwd_dist(power, fwd_free, vertices, source_ind) assert (dist == 0.0) assert (power.data[(source_ind, 1)] > power.data[(source_ind, 0)])
@idx_param @pytest.mark.parametrize('inversion, weight_norm', [('single', None), ('matrix', 'unit-noise-gain')]) def test_apply_dics_csd(_load_forward, idx, inversion, weight_norm): (fwd_free, fwd_surf, fwd_fixed, _) = _load_forward (epochs, _, csd, source_vertno, label, vertices, source_ind) = _simulate_data(fwd_fixed, idx) reg = 1 with pytest.raises(ValueError, match='several sensor types'): make_dics(epochs.info, fwd_free, csd) epochs.pick_types(meg='grad') assert (label.hemi == 'lh') for fwd in [fwd_free, fwd_surf, fwd_fixed]: filters = make_dics(epochs.info, fwd, csd, label=label, reg=reg, inversion=inversion, weight_norm=weight_norm) (power, f) = apply_dics_csd(csd, filters) assert (f == [10, 20]) dist = _fwd_dist(power, fwd_free, vertices, source_ind) assert (dist == 0.0) assert (power.data[(source_ind, 1)] > power.data[(source_ind, 0)])<|docstring|>Test applying a DICS beamformer to a CSD matrix.<|endoftext|>
ab0bee1ef9df0702a3758f19d576bec583663c683bad5a41686d40d82a28f9a2
@pytest.mark.parametrize('pick_ori', [None, 'normal', 'max-power']) @pytest.mark.parametrize('inversion', ['single', 'matrix']) @idx_param def test_apply_dics_ori_inv(_load_forward, pick_ori, inversion, idx): 'Test picking different orientations and inversion modes.' (fwd_free, fwd_surf, fwd_fixed, fwd_vol) = _load_forward (epochs, _, csd, source_vertno, label, vertices, source_ind) = _simulate_data(fwd_fixed, idx) epochs.pick_types(meg='grad') reg_ = (5 if (inversion == 'matrix') else 1) filters = make_dics(epochs.info, fwd_surf, csd, label=label, reg=reg_, pick_ori=pick_ori, inversion=inversion, depth=None, weight_norm='unit-noise-gain') (power, f) = apply_dics_csd(csd, filters) assert (f == [10, 20]) dist = _fwd_dist(power, fwd_surf, vertices, source_ind) assert (dist <= (0.02 if (inversion == 'matrix') else 0.0)) assert (power.data[(source_ind, 1)] > power.data[(source_ind, 0)]) csd_noise = csd.copy() inds = np.triu_indices(csd.n_channels) csd_noise._data[...] = np.eye(csd.n_channels)[inds][(:, np.newaxis)] (noise_power, f) = apply_dics_csd(csd_noise, filters) want_norm = (3 if (pick_ori is None) else 1.0) assert_allclose(noise_power.data, want_norm, atol=1e-07) filters = make_dics(epochs.info, fwd_surf, csd, label=label, reg=reg_, pick_ori=pick_ori, inversion=inversion, weight_norm=None, depth=1.0) (power, f) = apply_dics_csd(csd, filters) assert (f == [10, 20]) dist = _fwd_dist(power, fwd_surf, vertices, source_ind) mat_tol = {0: 0.055, 100: 0.2, 200: 0.015, 233: 0.035}[idx] max_ = (mat_tol if (inversion == 'matrix') else 0.0) assert (0 <= dist <= max_) assert (power.data[(source_ind, 1)] > power.data[(source_ind, 0)])
Test picking different orientations and inversion modes.
mne/beamformer/tests/test_dics.py
test_apply_dics_ori_inv
mehdikuchi/mne-python
3
python
@pytest.mark.parametrize('pick_ori', [None, 'normal', 'max-power']) @pytest.mark.parametrize('inversion', ['single', 'matrix']) @idx_param def test_apply_dics_ori_inv(_load_forward, pick_ori, inversion, idx): (fwd_free, fwd_surf, fwd_fixed, fwd_vol) = _load_forward (epochs, _, csd, source_vertno, label, vertices, source_ind) = _simulate_data(fwd_fixed, idx) epochs.pick_types(meg='grad') reg_ = (5 if (inversion == 'matrix') else 1) filters = make_dics(epochs.info, fwd_surf, csd, label=label, reg=reg_, pick_ori=pick_ori, inversion=inversion, depth=None, weight_norm='unit-noise-gain') (power, f) = apply_dics_csd(csd, filters) assert (f == [10, 20]) dist = _fwd_dist(power, fwd_surf, vertices, source_ind) assert (dist <= (0.02 if (inversion == 'matrix') else 0.0)) assert (power.data[(source_ind, 1)] > power.data[(source_ind, 0)]) csd_noise = csd.copy() inds = np.triu_indices(csd.n_channels) csd_noise._data[...] = np.eye(csd.n_channels)[inds][(:, np.newaxis)] (noise_power, f) = apply_dics_csd(csd_noise, filters) want_norm = (3 if (pick_ori is None) else 1.0) assert_allclose(noise_power.data, want_norm, atol=1e-07) filters = make_dics(epochs.info, fwd_surf, csd, label=label, reg=reg_, pick_ori=pick_ori, inversion=inversion, weight_norm=None, depth=1.0) (power, f) = apply_dics_csd(csd, filters) assert (f == [10, 20]) dist = _fwd_dist(power, fwd_surf, vertices, source_ind) mat_tol = {0: 0.055, 100: 0.2, 200: 0.015, 233: 0.035}[idx] max_ = (mat_tol if (inversion == 'matrix') else 0.0) assert (0 <= dist <= max_) assert (power.data[(source_ind, 1)] > power.data[(source_ind, 0)])
@pytest.mark.parametrize('pick_ori', [None, 'normal', 'max-power']) @pytest.mark.parametrize('inversion', ['single', 'matrix']) @idx_param def test_apply_dics_ori_inv(_load_forward, pick_ori, inversion, idx): (fwd_free, fwd_surf, fwd_fixed, fwd_vol) = _load_forward (epochs, _, csd, source_vertno, label, vertices, source_ind) = _simulate_data(fwd_fixed, idx) epochs.pick_types(meg='grad') reg_ = (5 if (inversion == 'matrix') else 1) filters = make_dics(epochs.info, fwd_surf, csd, label=label, reg=reg_, pick_ori=pick_ori, inversion=inversion, depth=None, weight_norm='unit-noise-gain') (power, f) = apply_dics_csd(csd, filters) assert (f == [10, 20]) dist = _fwd_dist(power, fwd_surf, vertices, source_ind) assert (dist <= (0.02 if (inversion == 'matrix') else 0.0)) assert (power.data[(source_ind, 1)] > power.data[(source_ind, 0)]) csd_noise = csd.copy() inds = np.triu_indices(csd.n_channels) csd_noise._data[...] = np.eye(csd.n_channels)[inds][(:, np.newaxis)] (noise_power, f) = apply_dics_csd(csd_noise, filters) want_norm = (3 if (pick_ori is None) else 1.0) assert_allclose(noise_power.data, want_norm, atol=1e-07) filters = make_dics(epochs.info, fwd_surf, csd, label=label, reg=reg_, pick_ori=pick_ori, inversion=inversion, weight_norm=None, depth=1.0) (power, f) = apply_dics_csd(csd, filters) assert (f == [10, 20]) dist = _fwd_dist(power, fwd_surf, vertices, source_ind) mat_tol = {0: 0.055, 100: 0.2, 200: 0.015, 233: 0.035}[idx] max_ = (mat_tol if (inversion == 'matrix') else 0.0) assert (0 <= dist <= max_) assert (power.data[(source_ind, 1)] > power.data[(source_ind, 0)])<|docstring|>Test picking different orientations and inversion modes.<|endoftext|>
01c46a3e90d2d4acb40501b6c9521fbc1872d10d229dba075590c19fd402a48e
@idx_param def test_real(_load_forward, idx): 'Test using a real-valued filter.' (fwd_free, fwd_surf, fwd_fixed, fwd_vol) = _load_forward (epochs, _, csd, source_vertno, label, vertices, source_ind) = _simulate_data(fwd_fixed, idx) epochs.pick_types(meg='grad') reg = 1 filters_real = make_dics(epochs.info, fwd_surf, csd, label=label, reg=reg, real_filter=True, inversion='single') with pytest.warns(None) as w: (power, f) = apply_dics_csd(csd, filters_real) assert (len(w) == 0) assert (f == [10, 20]) dist = _fwd_dist(power, fwd_surf, vertices, source_ind) assert (dist == 0) assert (power.data[(source_ind, 1)] > power.data[(source_ind, 0)]) filters_real = make_dics(epochs.info, fwd_surf, csd, label=label, reg=5, pick_ori='max-power', inversion='matrix', reduce_rank=True) (power, f) = apply_dics_csd(csd, filters_real) assert (f == [10, 20]) dist = _fwd_dist(power, fwd_surf, vertices, source_ind) assert (dist == 0) assert (power.data[(source_ind, 1)] > power.data[(source_ind, 0)]) filters_vol = make_dics(epochs.info, fwd_vol, csd, reg=reg, inversion='single') (power, f) = apply_dics_csd(csd, filters_vol) vol_source_ind = _nearest_vol_ind(fwd_vol, fwd_surf, vertices, source_ind) assert (f == [10, 20]) dist = _fwd_dist(power, fwd_vol, fwd_vol['src'][0]['vertno'], vol_source_ind) vol_tols = {100: 0.008, 200: 0.008} assert (dist <= vol_tols.get(idx, 0.0)) assert (power.data[(vol_source_ind, 1)] > power.data[(vol_source_ind, 0)]) del filters_vol['src_type'] with pytest.warns(RuntimeWarning, match='spatial filter does not contain src_type'): apply_dics_csd(csd, filters_vol)
Test using a real-valued filter.
mne/beamformer/tests/test_dics.py
test_real
mehdikuchi/mne-python
3
python
@idx_param def test_real(_load_forward, idx): (fwd_free, fwd_surf, fwd_fixed, fwd_vol) = _load_forward (epochs, _, csd, source_vertno, label, vertices, source_ind) = _simulate_data(fwd_fixed, idx) epochs.pick_types(meg='grad') reg = 1 filters_real = make_dics(epochs.info, fwd_surf, csd, label=label, reg=reg, real_filter=True, inversion='single') with pytest.warns(None) as w: (power, f) = apply_dics_csd(csd, filters_real) assert (len(w) == 0) assert (f == [10, 20]) dist = _fwd_dist(power, fwd_surf, vertices, source_ind) assert (dist == 0) assert (power.data[(source_ind, 1)] > power.data[(source_ind, 0)]) filters_real = make_dics(epochs.info, fwd_surf, csd, label=label, reg=5, pick_ori='max-power', inversion='matrix', reduce_rank=True) (power, f) = apply_dics_csd(csd, filters_real) assert (f == [10, 20]) dist = _fwd_dist(power, fwd_surf, vertices, source_ind) assert (dist == 0) assert (power.data[(source_ind, 1)] > power.data[(source_ind, 0)]) filters_vol = make_dics(epochs.info, fwd_vol, csd, reg=reg, inversion='single') (power, f) = apply_dics_csd(csd, filters_vol) vol_source_ind = _nearest_vol_ind(fwd_vol, fwd_surf, vertices, source_ind) assert (f == [10, 20]) dist = _fwd_dist(power, fwd_vol, fwd_vol['src'][0]['vertno'], vol_source_ind) vol_tols = {100: 0.008, 200: 0.008} assert (dist <= vol_tols.get(idx, 0.0)) assert (power.data[(vol_source_ind, 1)] > power.data[(vol_source_ind, 0)]) del filters_vol['src_type'] with pytest.warns(RuntimeWarning, match='spatial filter does not contain src_type'): apply_dics_csd(csd, filters_vol)
@idx_param def test_real(_load_forward, idx): (fwd_free, fwd_surf, fwd_fixed, fwd_vol) = _load_forward (epochs, _, csd, source_vertno, label, vertices, source_ind) = _simulate_data(fwd_fixed, idx) epochs.pick_types(meg='grad') reg = 1 filters_real = make_dics(epochs.info, fwd_surf, csd, label=label, reg=reg, real_filter=True, inversion='single') with pytest.warns(None) as w: (power, f) = apply_dics_csd(csd, filters_real) assert (len(w) == 0) assert (f == [10, 20]) dist = _fwd_dist(power, fwd_surf, vertices, source_ind) assert (dist == 0) assert (power.data[(source_ind, 1)] > power.data[(source_ind, 0)]) filters_real = make_dics(epochs.info, fwd_surf, csd, label=label, reg=5, pick_ori='max-power', inversion='matrix', reduce_rank=True) (power, f) = apply_dics_csd(csd, filters_real) assert (f == [10, 20]) dist = _fwd_dist(power, fwd_surf, vertices, source_ind) assert (dist == 0) assert (power.data[(source_ind, 1)] > power.data[(source_ind, 0)]) filters_vol = make_dics(epochs.info, fwd_vol, csd, reg=reg, inversion='single') (power, f) = apply_dics_csd(csd, filters_vol) vol_source_ind = _nearest_vol_ind(fwd_vol, fwd_surf, vertices, source_ind) assert (f == [10, 20]) dist = _fwd_dist(power, fwd_vol, fwd_vol['src'][0]['vertno'], vol_source_ind) vol_tols = {100: 0.008, 200: 0.008} assert (dist <= vol_tols.get(idx, 0.0)) assert (power.data[(vol_source_ind, 1)] > power.data[(vol_source_ind, 0)]) del filters_vol['src_type'] with pytest.warns(RuntimeWarning, match='spatial filter does not contain src_type'): apply_dics_csd(csd, filters_vol)<|docstring|>Test using a real-valued filter.<|endoftext|>
6d673fc01347dd871820e7bdabfe7a58f67c33e05eb76326fbb7dad00b54d786
@pytest.mark.filterwarnings('ignore:The use of several sensor types with the:RuntimeWarning') @idx_param def test_apply_dics_timeseries(_load_forward, idx): 'Test DICS applied to timeseries data.' (fwd_free, fwd_surf, fwd_fixed, fwd_vol) = _load_forward (epochs, evoked, csd, source_vertno, label, vertices, source_ind) = _simulate_data(fwd_fixed, idx) reg = 5 with pytest.raises(ValueError, match='several sensor types'): make_dics(evoked.info, fwd_surf, csd) evoked.pick_types(meg='grad') multiple_filters = make_dics(evoked.info, fwd_surf, csd, label=label, reg=reg) stcs = apply_dics(evoked, multiple_filters) assert isinstance(stcs, list) assert (len(stcs) == len(multiple_filters['weights'])) assert_array_equal(stcs[0].vertices[0], multiple_filters['vertices'][0]) assert_array_equal(stcs[0].vertices[1], multiple_filters['vertices'][1]) assert_allclose(stcs[0].times, evoked.times) with pytest.raises(ValueError, match='computed for a single frequency'): apply_dics_epochs(epochs, multiple_filters) csd20 = csd.pick_frequency(20) filters = make_dics(evoked.info, fwd_surf, csd20, label=label, reg=reg, inversion='single') with pytest.warns(None) as w: stcs = apply_dics_epochs(epochs, filters) assert (len(w) == 0) assert isinstance(stcs, list) assert (len(stcs) == 1) assert_array_equal(stcs[0].vertices[0], filters['vertices'][0]) assert_array_equal(stcs[0].vertices[1], filters['vertices'][1]) assert_allclose(stcs[0].times, epochs.times) stc = (stcs[0] ** 2).mean() dist = _fwd_dist(stc, fwd_surf, vertices, source_ind, tidx=0) assert (dist == 0) stc = apply_dics(evoked, filters) stc = (stc ** 2).mean() dist = _fwd_dist(stc, fwd_surf, vertices, source_ind, tidx=0) assert (dist == 0) evoked_ch = cp.deepcopy(evoked) evoked_ch.pick_channels(evoked_ch.ch_names[:(- 1)]) with pytest.raises(ValueError, match='MEG 2633 which is not present'): apply_dics(evoked_ch, filters) filters_noproj = make_dics(evoked.info, fwd_surf, csd20, label=label) stc_noproj = apply_dics(evoked, filters_noproj) evoked_proj = evoked.copy() p = compute_proj_evoked(evoked_proj, n_grad=1, n_mag=0, n_eeg=0) proj_matrix = make_projector(p, evoked_proj.ch_names)[0] evoked_proj.info['projs'] += p filters_proj = make_dics(evoked_proj.info, fwd_surf, csd20, label=label) assert_array_equal(filters_proj['proj'], proj_matrix) stc_proj = apply_dics(evoked_proj, filters_proj) assert np.any(np.not_equal(stc_noproj.data, stc_proj.data)) filters_proj['proj'] = filters_proj['proj'][(:(- 1), :(- 1))] with pytest.raises(ValueError, match='operands could not be broadcast'): apply_dics(evoked_proj, filters_proj) stcs = apply_dics_epochs(epochs, filters, return_generator=False) stcs_gen = apply_dics_epochs(epochs, filters, return_generator=True) assert_array_equal(stcs[0].data, next(stcs_gen).data) filters_vol = make_dics(evoked.info, fwd_vol, csd20, reg=reg, inversion='single') stc = apply_dics(evoked, filters_vol) stc = (stc ** 2).mean() assert (stc.data.shape[1] == 1) vol_source_ind = _nearest_vol_ind(fwd_vol, fwd_surf, vertices, source_ind) dist = _fwd_dist(stc, fwd_vol, fwd_vol['src'][0]['vertno'], vol_source_ind, tidx=0) vol_tols = {100: 0.008, 200: 0.015} vol_tol = vol_tols.get(idx, 0.0) assert (dist <= vol_tol) del filters_vol['src_type'] with pytest.warns(RuntimeWarning, match='filter does not contain src_typ'): apply_dics_epochs(epochs, filters_vol)
Test DICS applied to timeseries data.
mne/beamformer/tests/test_dics.py
test_apply_dics_timeseries
mehdikuchi/mne-python
3
python
@pytest.mark.filterwarnings('ignore:The use of several sensor types with the:RuntimeWarning') @idx_param def test_apply_dics_timeseries(_load_forward, idx): (fwd_free, fwd_surf, fwd_fixed, fwd_vol) = _load_forward (epochs, evoked, csd, source_vertno, label, vertices, source_ind) = _simulate_data(fwd_fixed, idx) reg = 5 with pytest.raises(ValueError, match='several sensor types'): make_dics(evoked.info, fwd_surf, csd) evoked.pick_types(meg='grad') multiple_filters = make_dics(evoked.info, fwd_surf, csd, label=label, reg=reg) stcs = apply_dics(evoked, multiple_filters) assert isinstance(stcs, list) assert (len(stcs) == len(multiple_filters['weights'])) assert_array_equal(stcs[0].vertices[0], multiple_filters['vertices'][0]) assert_array_equal(stcs[0].vertices[1], multiple_filters['vertices'][1]) assert_allclose(stcs[0].times, evoked.times) with pytest.raises(ValueError, match='computed for a single frequency'): apply_dics_epochs(epochs, multiple_filters) csd20 = csd.pick_frequency(20) filters = make_dics(evoked.info, fwd_surf, csd20, label=label, reg=reg, inversion='single') with pytest.warns(None) as w: stcs = apply_dics_epochs(epochs, filters) assert (len(w) == 0) assert isinstance(stcs, list) assert (len(stcs) == 1) assert_array_equal(stcs[0].vertices[0], filters['vertices'][0]) assert_array_equal(stcs[0].vertices[1], filters['vertices'][1]) assert_allclose(stcs[0].times, epochs.times) stc = (stcs[0] ** 2).mean() dist = _fwd_dist(stc, fwd_surf, vertices, source_ind, tidx=0) assert (dist == 0) stc = apply_dics(evoked, filters) stc = (stc ** 2).mean() dist = _fwd_dist(stc, fwd_surf, vertices, source_ind, tidx=0) assert (dist == 0) evoked_ch = cp.deepcopy(evoked) evoked_ch.pick_channels(evoked_ch.ch_names[:(- 1)]) with pytest.raises(ValueError, match='MEG 2633 which is not present'): apply_dics(evoked_ch, filters) filters_noproj = make_dics(evoked.info, fwd_surf, csd20, label=label) stc_noproj = apply_dics(evoked, filters_noproj) evoked_proj = evoked.copy() p = compute_proj_evoked(evoked_proj, n_grad=1, n_mag=0, n_eeg=0) proj_matrix = make_projector(p, evoked_proj.ch_names)[0] evoked_proj.info['projs'] += p filters_proj = make_dics(evoked_proj.info, fwd_surf, csd20, label=label) assert_array_equal(filters_proj['proj'], proj_matrix) stc_proj = apply_dics(evoked_proj, filters_proj) assert np.any(np.not_equal(stc_noproj.data, stc_proj.data)) filters_proj['proj'] = filters_proj['proj'][(:(- 1), :(- 1))] with pytest.raises(ValueError, match='operands could not be broadcast'): apply_dics(evoked_proj, filters_proj) stcs = apply_dics_epochs(epochs, filters, return_generator=False) stcs_gen = apply_dics_epochs(epochs, filters, return_generator=True) assert_array_equal(stcs[0].data, next(stcs_gen).data) filters_vol = make_dics(evoked.info, fwd_vol, csd20, reg=reg, inversion='single') stc = apply_dics(evoked, filters_vol) stc = (stc ** 2).mean() assert (stc.data.shape[1] == 1) vol_source_ind = _nearest_vol_ind(fwd_vol, fwd_surf, vertices, source_ind) dist = _fwd_dist(stc, fwd_vol, fwd_vol['src'][0]['vertno'], vol_source_ind, tidx=0) vol_tols = {100: 0.008, 200: 0.015} vol_tol = vol_tols.get(idx, 0.0) assert (dist <= vol_tol) del filters_vol['src_type'] with pytest.warns(RuntimeWarning, match='filter does not contain src_typ'): apply_dics_epochs(epochs, filters_vol)
@pytest.mark.filterwarnings('ignore:The use of several sensor types with the:RuntimeWarning') @idx_param def test_apply_dics_timeseries(_load_forward, idx): (fwd_free, fwd_surf, fwd_fixed, fwd_vol) = _load_forward (epochs, evoked, csd, source_vertno, label, vertices, source_ind) = _simulate_data(fwd_fixed, idx) reg = 5 with pytest.raises(ValueError, match='several sensor types'): make_dics(evoked.info, fwd_surf, csd) evoked.pick_types(meg='grad') multiple_filters = make_dics(evoked.info, fwd_surf, csd, label=label, reg=reg) stcs = apply_dics(evoked, multiple_filters) assert isinstance(stcs, list) assert (len(stcs) == len(multiple_filters['weights'])) assert_array_equal(stcs[0].vertices[0], multiple_filters['vertices'][0]) assert_array_equal(stcs[0].vertices[1], multiple_filters['vertices'][1]) assert_allclose(stcs[0].times, evoked.times) with pytest.raises(ValueError, match='computed for a single frequency'): apply_dics_epochs(epochs, multiple_filters) csd20 = csd.pick_frequency(20) filters = make_dics(evoked.info, fwd_surf, csd20, label=label, reg=reg, inversion='single') with pytest.warns(None) as w: stcs = apply_dics_epochs(epochs, filters) assert (len(w) == 0) assert isinstance(stcs, list) assert (len(stcs) == 1) assert_array_equal(stcs[0].vertices[0], filters['vertices'][0]) assert_array_equal(stcs[0].vertices[1], filters['vertices'][1]) assert_allclose(stcs[0].times, epochs.times) stc = (stcs[0] ** 2).mean() dist = _fwd_dist(stc, fwd_surf, vertices, source_ind, tidx=0) assert (dist == 0) stc = apply_dics(evoked, filters) stc = (stc ** 2).mean() dist = _fwd_dist(stc, fwd_surf, vertices, source_ind, tidx=0) assert (dist == 0) evoked_ch = cp.deepcopy(evoked) evoked_ch.pick_channels(evoked_ch.ch_names[:(- 1)]) with pytest.raises(ValueError, match='MEG 2633 which is not present'): apply_dics(evoked_ch, filters) filters_noproj = make_dics(evoked.info, fwd_surf, csd20, label=label) stc_noproj = apply_dics(evoked, filters_noproj) evoked_proj = evoked.copy() p = compute_proj_evoked(evoked_proj, n_grad=1, n_mag=0, n_eeg=0) proj_matrix = make_projector(p, evoked_proj.ch_names)[0] evoked_proj.info['projs'] += p filters_proj = make_dics(evoked_proj.info, fwd_surf, csd20, label=label) assert_array_equal(filters_proj['proj'], proj_matrix) stc_proj = apply_dics(evoked_proj, filters_proj) assert np.any(np.not_equal(stc_noproj.data, stc_proj.data)) filters_proj['proj'] = filters_proj['proj'][(:(- 1), :(- 1))] with pytest.raises(ValueError, match='operands could not be broadcast'): apply_dics(evoked_proj, filters_proj) stcs = apply_dics_epochs(epochs, filters, return_generator=False) stcs_gen = apply_dics_epochs(epochs, filters, return_generator=True) assert_array_equal(stcs[0].data, next(stcs_gen).data) filters_vol = make_dics(evoked.info, fwd_vol, csd20, reg=reg, inversion='single') stc = apply_dics(evoked, filters_vol) stc = (stc ** 2).mean() assert (stc.data.shape[1] == 1) vol_source_ind = _nearest_vol_ind(fwd_vol, fwd_surf, vertices, source_ind) dist = _fwd_dist(stc, fwd_vol, fwd_vol['src'][0]['vertno'], vol_source_ind, tidx=0) vol_tols = {100: 0.008, 200: 0.015} vol_tol = vol_tols.get(idx, 0.0) assert (dist <= vol_tol) del filters_vol['src_type'] with pytest.warns(RuntimeWarning, match='filter does not contain src_typ'): apply_dics_epochs(epochs, filters_vol)<|docstring|>Test DICS applied to timeseries data.<|endoftext|>
5085ab3baaf98013cd5937537766728a8f749967db4c046b24e6b8093032ada9
@pytest.mark.slowtest @testing.requires_testing_data @idx_param def test_tf_dics(_load_forward, idx): 'Test 5D time-frequency beamforming based on DICS.' (fwd_free, fwd_surf, fwd_fixed, _) = _load_forward (epochs, _, _, source_vertno, label, vertices, source_ind) = _simulate_data(fwd_fixed, idx) reg = 1 tmin = 0 tmax = 9 tstep = 4 win_lengths = [5, 5] frequencies = [10, 20] freq_bins = [(8, 12), (18, 22)] with pytest.raises(ValueError, match='several sensor types'): stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths, freq_bins=freq_bins, frequencies=frequencies, decim=10, reg=reg, label=label) epochs.pick_types(meg='grad') for mode in ['fourier', 'multitaper', 'cwt_morlet']: stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths, mode=mode, freq_bins=freq_bins, frequencies=frequencies, decim=10, reg=reg, label=label) dist = _fwd_dist(stcs[1], fwd_surf, vertices, source_ind, tidx=0) assert (dist == 0) dist = _fwd_dist(stcs[1], fwd_surf, vertices, source_ind, tidx=1) assert (dist == 0) assert (stcs[1].data[(source_ind, 0)] > stcs[1].data[(source_ind, 1)]) assert (stcs[1].data[(source_ind, 0)] > stcs[0].data[(source_ind, 0)]) source_power = [] time_windows = [(0, 5), (4, 9)] for time_window in time_windows: csd = csd_morlet(epochs, frequencies=[frequencies[1]], tmin=time_window[0], tmax=time_window[1], decim=10) csd = csd.sum() csd._data /= csd.n_fft filters = make_dics(epochs.info, fwd_surf, csd, reg=reg, label=label, inversion='single') (stc_source_power, _) = apply_dics_csd(csd, filters) source_power.append(stc_source_power.data) assert_allclose(stcs[1].data, np.array(source_power).squeeze().T, atol=0) stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths, mode='cwt_morlet', frequencies=frequencies, decim=10, reg=reg, label=label, depth=None, weight_norm='unit-noise-gain') noise_csd = csd.copy() inds = np.triu_indices(csd.n_channels) noise_csd._data[(:, :)] = (2 * np.eye(csd.n_channels)[inds][(:, np.newaxis)]) noise_csd.n_fft = 2 noise_csds = [noise_csd, noise_csd] stcs_norm = tf_dics(epochs, fwd_surf, noise_csds, tmin, tmax, tstep, win_lengths, mode='cwt_morlet', frequencies=frequencies, decim=10, reg=reg, label=label, depth=None, weight_norm='unit-noise-gain') assert_allclose((3 * stcs_norm[0].data), stcs[0].data, atol=0) assert_allclose((3 * stcs_norm[1].data), stcs[1].data, atol=0) with pytest.raises(ValueError, match='fourier.*freq_bins" parameter'): tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths, mode='fourier', freq_bins=None) with pytest.raises(ValueError, match='cwt_morlet.*frequencies" param'): tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths, mode='cwt_morlet', frequencies=None) with pytest.raises(ValueError, match='One noise CSD object expected per'): tf_dics(epochs, fwd_surf, [noise_csds[0]], tmin, tmax, tstep, win_lengths, freq_bins=freq_bins) with pytest.raises(ValueError, match='One time window length expected'): tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths=[0, 1, 2], freq_bins=freq_bins) with pytest.raises(ValueError, match='Time step should not be larger'): tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep=0.15, win_lengths=[0.2, 0.1], freq_bins=freq_bins) with pytest.raises(ValueError, match='When specifying number of FFT'): tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths, freq_bins=freq_bins, n_ffts=[1]) with pytest.raises(ValueError, match='When using multitaper mode and'): tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths=win_lengths, freq_bins=freq_bins, mode='multitaper', mt_bandwidths=[20]) assert (len(epochs) == 1), len(epochs) with np.errstate(invalid='ignore'): stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths, mode='cwt_morlet', frequencies=frequencies, subtract_evoked=True, reg=reg, label=label, decim=20) assert np.all(np.isnan(stcs[0].data))
Test 5D time-frequency beamforming based on DICS.
mne/beamformer/tests/test_dics.py
test_tf_dics
mehdikuchi/mne-python
3
python
@pytest.mark.slowtest @testing.requires_testing_data @idx_param def test_tf_dics(_load_forward, idx): (fwd_free, fwd_surf, fwd_fixed, _) = _load_forward (epochs, _, _, source_vertno, label, vertices, source_ind) = _simulate_data(fwd_fixed, idx) reg = 1 tmin = 0 tmax = 9 tstep = 4 win_lengths = [5, 5] frequencies = [10, 20] freq_bins = [(8, 12), (18, 22)] with pytest.raises(ValueError, match='several sensor types'): stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths, freq_bins=freq_bins, frequencies=frequencies, decim=10, reg=reg, label=label) epochs.pick_types(meg='grad') for mode in ['fourier', 'multitaper', 'cwt_morlet']: stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths, mode=mode, freq_bins=freq_bins, frequencies=frequencies, decim=10, reg=reg, label=label) dist = _fwd_dist(stcs[1], fwd_surf, vertices, source_ind, tidx=0) assert (dist == 0) dist = _fwd_dist(stcs[1], fwd_surf, vertices, source_ind, tidx=1) assert (dist == 0) assert (stcs[1].data[(source_ind, 0)] > stcs[1].data[(source_ind, 1)]) assert (stcs[1].data[(source_ind, 0)] > stcs[0].data[(source_ind, 0)]) source_power = [] time_windows = [(0, 5), (4, 9)] for time_window in time_windows: csd = csd_morlet(epochs, frequencies=[frequencies[1]], tmin=time_window[0], tmax=time_window[1], decim=10) csd = csd.sum() csd._data /= csd.n_fft filters = make_dics(epochs.info, fwd_surf, csd, reg=reg, label=label, inversion='single') (stc_source_power, _) = apply_dics_csd(csd, filters) source_power.append(stc_source_power.data) assert_allclose(stcs[1].data, np.array(source_power).squeeze().T, atol=0) stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths, mode='cwt_morlet', frequencies=frequencies, decim=10, reg=reg, label=label, depth=None, weight_norm='unit-noise-gain') noise_csd = csd.copy() inds = np.triu_indices(csd.n_channels) noise_csd._data[(:, :)] = (2 * np.eye(csd.n_channels)[inds][(:, np.newaxis)]) noise_csd.n_fft = 2 noise_csds = [noise_csd, noise_csd] stcs_norm = tf_dics(epochs, fwd_surf, noise_csds, tmin, tmax, tstep, win_lengths, mode='cwt_morlet', frequencies=frequencies, decim=10, reg=reg, label=label, depth=None, weight_norm='unit-noise-gain') assert_allclose((3 * stcs_norm[0].data), stcs[0].data, atol=0) assert_allclose((3 * stcs_norm[1].data), stcs[1].data, atol=0) with pytest.raises(ValueError, match='fourier.*freq_bins" parameter'): tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths, mode='fourier', freq_bins=None) with pytest.raises(ValueError, match='cwt_morlet.*frequencies" param'): tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths, mode='cwt_morlet', frequencies=None) with pytest.raises(ValueError, match='One noise CSD object expected per'): tf_dics(epochs, fwd_surf, [noise_csds[0]], tmin, tmax, tstep, win_lengths, freq_bins=freq_bins) with pytest.raises(ValueError, match='One time window length expected'): tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths=[0, 1, 2], freq_bins=freq_bins) with pytest.raises(ValueError, match='Time step should not be larger'): tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep=0.15, win_lengths=[0.2, 0.1], freq_bins=freq_bins) with pytest.raises(ValueError, match='When specifying number of FFT'): tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths, freq_bins=freq_bins, n_ffts=[1]) with pytest.raises(ValueError, match='When using multitaper mode and'): tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths=win_lengths, freq_bins=freq_bins, mode='multitaper', mt_bandwidths=[20]) assert (len(epochs) == 1), len(epochs) with np.errstate(invalid='ignore'): stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths, mode='cwt_morlet', frequencies=frequencies, subtract_evoked=True, reg=reg, label=label, decim=20) assert np.all(np.isnan(stcs[0].data))
@pytest.mark.slowtest @testing.requires_testing_data @idx_param def test_tf_dics(_load_forward, idx): (fwd_free, fwd_surf, fwd_fixed, _) = _load_forward (epochs, _, _, source_vertno, label, vertices, source_ind) = _simulate_data(fwd_fixed, idx) reg = 1 tmin = 0 tmax = 9 tstep = 4 win_lengths = [5, 5] frequencies = [10, 20] freq_bins = [(8, 12), (18, 22)] with pytest.raises(ValueError, match='several sensor types'): stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths, freq_bins=freq_bins, frequencies=frequencies, decim=10, reg=reg, label=label) epochs.pick_types(meg='grad') for mode in ['fourier', 'multitaper', 'cwt_morlet']: stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths, mode=mode, freq_bins=freq_bins, frequencies=frequencies, decim=10, reg=reg, label=label) dist = _fwd_dist(stcs[1], fwd_surf, vertices, source_ind, tidx=0) assert (dist == 0) dist = _fwd_dist(stcs[1], fwd_surf, vertices, source_ind, tidx=1) assert (dist == 0) assert (stcs[1].data[(source_ind, 0)] > stcs[1].data[(source_ind, 1)]) assert (stcs[1].data[(source_ind, 0)] > stcs[0].data[(source_ind, 0)]) source_power = [] time_windows = [(0, 5), (4, 9)] for time_window in time_windows: csd = csd_morlet(epochs, frequencies=[frequencies[1]], tmin=time_window[0], tmax=time_window[1], decim=10) csd = csd.sum() csd._data /= csd.n_fft filters = make_dics(epochs.info, fwd_surf, csd, reg=reg, label=label, inversion='single') (stc_source_power, _) = apply_dics_csd(csd, filters) source_power.append(stc_source_power.data) assert_allclose(stcs[1].data, np.array(source_power).squeeze().T, atol=0) stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths, mode='cwt_morlet', frequencies=frequencies, decim=10, reg=reg, label=label, depth=None, weight_norm='unit-noise-gain') noise_csd = csd.copy() inds = np.triu_indices(csd.n_channels) noise_csd._data[(:, :)] = (2 * np.eye(csd.n_channels)[inds][(:, np.newaxis)]) noise_csd.n_fft = 2 noise_csds = [noise_csd, noise_csd] stcs_norm = tf_dics(epochs, fwd_surf, noise_csds, tmin, tmax, tstep, win_lengths, mode='cwt_morlet', frequencies=frequencies, decim=10, reg=reg, label=label, depth=None, weight_norm='unit-noise-gain') assert_allclose((3 * stcs_norm[0].data), stcs[0].data, atol=0) assert_allclose((3 * stcs_norm[1].data), stcs[1].data, atol=0) with pytest.raises(ValueError, match='fourier.*freq_bins" parameter'): tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths, mode='fourier', freq_bins=None) with pytest.raises(ValueError, match='cwt_morlet.*frequencies" param'): tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths, mode='cwt_morlet', frequencies=None) with pytest.raises(ValueError, match='One noise CSD object expected per'): tf_dics(epochs, fwd_surf, [noise_csds[0]], tmin, tmax, tstep, win_lengths, freq_bins=freq_bins) with pytest.raises(ValueError, match='One time window length expected'): tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths=[0, 1, 2], freq_bins=freq_bins) with pytest.raises(ValueError, match='Time step should not be larger'): tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep=0.15, win_lengths=[0.2, 0.1], freq_bins=freq_bins) with pytest.raises(ValueError, match='When specifying number of FFT'): tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths, freq_bins=freq_bins, n_ffts=[1]) with pytest.raises(ValueError, match='When using multitaper mode and'): tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths=win_lengths, freq_bins=freq_bins, mode='multitaper', mt_bandwidths=[20]) assert (len(epochs) == 1), len(epochs) with np.errstate(invalid='ignore'): stcs = tf_dics(epochs, fwd_surf, None, tmin, tmax, tstep, win_lengths, mode='cwt_morlet', frequencies=frequencies, subtract_evoked=True, reg=reg, label=label, decim=20) assert np.all(np.isnan(stcs[0].data))<|docstring|>Test 5D time-frequency beamforming based on DICS.<|endoftext|>
d634065f943b520a49848342671b4a2f5e64c49edb6f2c8f8557351a669ab91f
def __process_collection(self): 'Private function to create, broadcast, confirm tx on eth and then send\n transaction to Discord for monitoring\n ' try: tx_hash = self.__send_collection_tx() (succeeded, _) = confirm_transaction(self.web3, tx_hash) if succeeded: gas_price_of_tx = get_gas_price_of_tx(self.web3, self.eth_usd_oracle, tx_hash, Network.Ethereum) send_success_to_discord(tx_hash=tx_hash, tx_type='ibBTC Fee Collection', gas_cost=gas_price_of_tx) elif (tx_hash != HexBytes(0)): send_success_to_discord(tx_hash=tx_hash, tx_type='ibBTC Fee Collection') except Exception as e: self.logger.error(f'Error processing collection tx: {e}') send_oracle_error_to_discord(tx_type='ibBTC Fee Collection', error=e)
Private function to create, broadcast, confirm tx on eth and then send transaction to Discord for monitoring
src/ibbtc_fee_collector.py
__process_collection
Badger-Finance/python-keepers
0
python
def __process_collection(self): 'Private function to create, broadcast, confirm tx on eth and then send\n transaction to Discord for monitoring\n ' try: tx_hash = self.__send_collection_tx() (succeeded, _) = confirm_transaction(self.web3, tx_hash) if succeeded: gas_price_of_tx = get_gas_price_of_tx(self.web3, self.eth_usd_oracle, tx_hash, Network.Ethereum) send_success_to_discord(tx_hash=tx_hash, tx_type='ibBTC Fee Collection', gas_cost=gas_price_of_tx) elif (tx_hash != HexBytes(0)): send_success_to_discord(tx_hash=tx_hash, tx_type='ibBTC Fee Collection') except Exception as e: self.logger.error(f'Error processing collection tx: {e}') send_oracle_error_to_discord(tx_type='ibBTC Fee Collection', error=e)
def __process_collection(self): 'Private function to create, broadcast, confirm tx on eth and then send\n transaction to Discord for monitoring\n ' try: tx_hash = self.__send_collection_tx() (succeeded, _) = confirm_transaction(self.web3, tx_hash) if succeeded: gas_price_of_tx = get_gas_price_of_tx(self.web3, self.eth_usd_oracle, tx_hash, Network.Ethereum) send_success_to_discord(tx_hash=tx_hash, tx_type='ibBTC Fee Collection', gas_cost=gas_price_of_tx) elif (tx_hash != HexBytes(0)): send_success_to_discord(tx_hash=tx_hash, tx_type='ibBTC Fee Collection') except Exception as e: self.logger.error(f'Error processing collection tx: {e}') send_oracle_error_to_discord(tx_type='ibBTC Fee Collection', error=e)<|docstring|>Private function to create, broadcast, confirm tx on eth and then send transaction to Discord for monitoring<|endoftext|>
5fd18038f33104a0a02dd33fd05a9c32dc61121ffd263cc43a5fa1721003c7b0
def __send_collection_tx(self) -> HexBytes: 'Sends transaction to ETH node for confirmation.\n\n Raises:\n Exception: If we have an issue sending transaction (unable to communicate with\n node, etc.) we log the error and return a tx_hash of 0x00.\n\n Returns:\n HexBytes: Transaction hash for transaction that was sent.\n ' options = {'nonce': self.web3.eth.get_transaction_count(self.keeper_address), 'from': self.keeper_address, 'gas': GAS_LIMITS[Network.Ethereum], 'maxPriorityFeePerGas': get_priority_fee(self.web3), 'maxFeePerGas': get_effective_gas_price(self.web3)} tx_hash = HexBytes(0) try: tx = self.ibbtc.functions.collectFee().buildTransaction(options) signed_tx = self.web3.eth.account.sign_transaction(tx, private_key=self.keeper_key) tx_hash = signed_tx.hash self.web3.eth.send_raw_transaction(signed_tx.rawTransaction) except ValueError as e: self.logger.error(f'Error in sending collection tx: {e}') tx_hash = get_hash_from_failed_tx_error(e, self.logger, keeper_address=self.keeper_address) finally: return tx_hash
Sends transaction to ETH node for confirmation. Raises: Exception: If we have an issue sending transaction (unable to communicate with node, etc.) we log the error and return a tx_hash of 0x00. Returns: HexBytes: Transaction hash for transaction that was sent.
src/ibbtc_fee_collector.py
__send_collection_tx
Badger-Finance/python-keepers
0
python
def __send_collection_tx(self) -> HexBytes: 'Sends transaction to ETH node for confirmation.\n\n Raises:\n Exception: If we have an issue sending transaction (unable to communicate with\n node, etc.) we log the error and return a tx_hash of 0x00.\n\n Returns:\n HexBytes: Transaction hash for transaction that was sent.\n ' options = {'nonce': self.web3.eth.get_transaction_count(self.keeper_address), 'from': self.keeper_address, 'gas': GAS_LIMITS[Network.Ethereum], 'maxPriorityFeePerGas': get_priority_fee(self.web3), 'maxFeePerGas': get_effective_gas_price(self.web3)} tx_hash = HexBytes(0) try: tx = self.ibbtc.functions.collectFee().buildTransaction(options) signed_tx = self.web3.eth.account.sign_transaction(tx, private_key=self.keeper_key) tx_hash = signed_tx.hash self.web3.eth.send_raw_transaction(signed_tx.rawTransaction) except ValueError as e: self.logger.error(f'Error in sending collection tx: {e}') tx_hash = get_hash_from_failed_tx_error(e, self.logger, keeper_address=self.keeper_address) finally: return tx_hash
def __send_collection_tx(self) -> HexBytes: 'Sends transaction to ETH node for confirmation.\n\n Raises:\n Exception: If we have an issue sending transaction (unable to communicate with\n node, etc.) we log the error and return a tx_hash of 0x00.\n\n Returns:\n HexBytes: Transaction hash for transaction that was sent.\n ' options = {'nonce': self.web3.eth.get_transaction_count(self.keeper_address), 'from': self.keeper_address, 'gas': GAS_LIMITS[Network.Ethereum], 'maxPriorityFeePerGas': get_priority_fee(self.web3), 'maxFeePerGas': get_effective_gas_price(self.web3)} tx_hash = HexBytes(0) try: tx = self.ibbtc.functions.collectFee().buildTransaction(options) signed_tx = self.web3.eth.account.sign_transaction(tx, private_key=self.keeper_key) tx_hash = signed_tx.hash self.web3.eth.send_raw_transaction(signed_tx.rawTransaction) except ValueError as e: self.logger.error(f'Error in sending collection tx: {e}') tx_hash = get_hash_from_failed_tx_error(e, self.logger, keeper_address=self.keeper_address) finally: return tx_hash<|docstring|>Sends transaction to ETH node for confirmation. Raises: Exception: If we have an issue sending transaction (unable to communicate with node, etc.) we log the error and return a tx_hash of 0x00. Returns: HexBytes: Transaction hash for transaction that was sent.<|endoftext|>
d4941d3f9a19e36753e36015644e0bdd55deb931ccd12611f88fa75dbf8f19f5
def addANDChained(ret_dict, circ0, idx0, circ1, idx1): '\n Adds an AND gate where both inputs come from a circuit\n ' this_gc_id = ret_dict['circs_used'] ret_dict['circs_used'] += 1 r = OrderedDict() r['type'] = 'CHAIN' r['from_gc_id'] = circ0 r['from_wire_id_start'] = idx0 r['from_wire_id_end'] = idx0 r['to_gc_id'] = this_gc_id r['to_wire_id_start'] = 0 r['to_wire_id_end'] = 0 ret_dict['instructions'].append(r) r = OrderedDict() r['type'] = 'CHAIN' r['from_gc_id'] = circ1 r['from_wire_id_start'] = idx1 r['from_wire_id_end'] = idx1 r['to_gc_id'] = this_gc_id r['to_wire_id_start'] = 1 r['to_wire_id_end'] = 1 ret_dict['instructions'].append(r) r = OrderedDict() r['type'] = 'EVAL' r['gc_id'] = this_gc_id ret_dict['instructions'].append(r) return this_gc_id
Adds an AND gate where both inputs come from a circuit
extra_scripts/and_json.py
addANDChained
aled1027/2pc
1
python
def addANDChained(ret_dict, circ0, idx0, circ1, idx1): '\n \n ' this_gc_id = ret_dict['circs_used'] ret_dict['circs_used'] += 1 r = OrderedDict() r['type'] = 'CHAIN' r['from_gc_id'] = circ0 r['from_wire_id_start'] = idx0 r['from_wire_id_end'] = idx0 r['to_gc_id'] = this_gc_id r['to_wire_id_start'] = 0 r['to_wire_id_end'] = 0 ret_dict['instructions'].append(r) r = OrderedDict() r['type'] = 'CHAIN' r['from_gc_id'] = circ1 r['from_wire_id_start'] = idx1 r['from_wire_id_end'] = idx1 r['to_gc_id'] = this_gc_id r['to_wire_id_start'] = 1 r['to_wire_id_end'] = 1 ret_dict['instructions'].append(r) r = OrderedDict() r['type'] = 'EVAL' r['gc_id'] = this_gc_id ret_dict['instructions'].append(r) return this_gc_id
def addANDChained(ret_dict, circ0, idx0, circ1, idx1): '\n \n ' this_gc_id = ret_dict['circs_used'] ret_dict['circs_used'] += 1 r = OrderedDict() r['type'] = 'CHAIN' r['from_gc_id'] = circ0 r['from_wire_id_start'] = idx0 r['from_wire_id_end'] = idx0 r['to_gc_id'] = this_gc_id r['to_wire_id_start'] = 0 r['to_wire_id_end'] = 0 ret_dict['instructions'].append(r) r = OrderedDict() r['type'] = 'CHAIN' r['from_gc_id'] = circ1 r['from_wire_id_start'] = idx1 r['from_wire_id_end'] = idx1 r['to_gc_id'] = this_gc_id r['to_wire_id_start'] = 1 r['to_wire_id_end'] = 1 ret_dict['instructions'].append(r) r = OrderedDict() r['type'] = 'EVAL' r['gc_id'] = this_gc_id ret_dict['instructions'].append(r) return this_gc_id<|docstring|>Adds an AND gate where both inputs come from a circuit<|endoftext|>
b52c739deed69939945493a31bf0710a5e2dd3fef61129357868b03bef32d345
def test_create_user_with_email_successful(self): 'Test creating a new user with an email is successful' email = 'example@example.com' password = 'testpass123' user = get_user_model().objects.create_user(email, password) self.assertEqual(user.email, email) self.assertTrue(user.check_password(password))
Test creating a new user with an email is successful
app/core/tests/test_models.py
test_create_user_with_email_successful
EmilzonJ/recipe-app-api
0
python
def test_create_user_with_email_successful(self): email = 'example@example.com' password = 'testpass123' user = get_user_model().objects.create_user(email, password) self.assertEqual(user.email, email) self.assertTrue(user.check_password(password))
def test_create_user_with_email_successful(self): email = 'example@example.com' password = 'testpass123' user = get_user_model().objects.create_user(email, password) self.assertEqual(user.email, email) self.assertTrue(user.check_password(password))<|docstring|>Test creating a new user with an email is successful<|endoftext|>
a1ce4b3a515def5a8503d0150b4e56c7a587c32d791e637c303297ad5f29870c
def test_new_user_email_normalized(self): 'Test the email for a new user is normalized' email = 'example@example.com' user = get_user_model().objects.create_user(email, 'test123') self.assertEqual(user.email, email.lower())
Test the email for a new user is normalized
app/core/tests/test_models.py
test_new_user_email_normalized
EmilzonJ/recipe-app-api
0
python
def test_new_user_email_normalized(self): email = 'example@example.com' user = get_user_model().objects.create_user(email, 'test123') self.assertEqual(user.email, email.lower())
def test_new_user_email_normalized(self): email = 'example@example.com' user = get_user_model().objects.create_user(email, 'test123') self.assertEqual(user.email, email.lower())<|docstring|>Test the email for a new user is normalized<|endoftext|>
7da94b3b00879ffd63c0f7770685fb90aa46e562e2c04cb515b4a962e39b4850
def test_new_user_invalid_email(self): 'Test creating user with no email raises error' with self.assertRaises(ValueError): get_user_model().objects.create_user(None, 'test123')
Test creating user with no email raises error
app/core/tests/test_models.py
test_new_user_invalid_email
EmilzonJ/recipe-app-api
0
python
def test_new_user_invalid_email(self): with self.assertRaises(ValueError): get_user_model().objects.create_user(None, 'test123')
def test_new_user_invalid_email(self): with self.assertRaises(ValueError): get_user_model().objects.create_user(None, 'test123')<|docstring|>Test creating user with no email raises error<|endoftext|>
5313ae00829bb9f4c4c9f1501c3b8598838d7ea285db6296579a345bc07d84eb
def test_create_new_superuser(self): 'Test creating a new superuser' user = get_user_model().objects.create_superuser(email='example@example.com', password='test123') self.assertTrue(user.is_superuser) self.assertTrue(user.is_staff)
Test creating a new superuser
app/core/tests/test_models.py
test_create_new_superuser
EmilzonJ/recipe-app-api
0
python
def test_create_new_superuser(self): user = get_user_model().objects.create_superuser(email='example@example.com', password='test123') self.assertTrue(user.is_superuser) self.assertTrue(user.is_staff)
def test_create_new_superuser(self): user = get_user_model().objects.create_superuser(email='example@example.com', password='test123') self.assertTrue(user.is_superuser) self.assertTrue(user.is_staff)<|docstring|>Test creating a new superuser<|endoftext|>
0de6d45dfcf0017007b13db9a7b155f9c53d3aee2afaa2b8f4d4b9862783efb8
def memoize(func): '\n Decorate function to memoize first call to thread safe cache.\n ' cache = func.cache = {} lock = RLock() @functools.wraps(func) def memoized_func(*args, **kwargs): key = (str(args) + str(kwargs)) if (key not in cache): with lock: if (key not in cache): cache[key] = func(*args, **kwargs) return cache[key] return memoized_func
Decorate function to memoize first call to thread safe cache.
modules/ducktests/tests/ignitetest/services/utils/decorators.py
memoize
brat-kuzma/ignite
4,339
python
def memoize(func): '\n \n ' cache = func.cache = {} lock = RLock() @functools.wraps(func) def memoized_func(*args, **kwargs): key = (str(args) + str(kwargs)) if (key not in cache): with lock: if (key not in cache): cache[key] = func(*args, **kwargs) return cache[key] return memoized_func
def memoize(func): '\n \n ' cache = func.cache = {} lock = RLock() @functools.wraps(func) def memoized_func(*args, **kwargs): key = (str(args) + str(kwargs)) if (key not in cache): with lock: if (key not in cache): cache[key] = func(*args, **kwargs) return cache[key] return memoized_func<|docstring|>Decorate function to memoize first call to thread safe cache.<|endoftext|>
5e16837d4a3c52768a4dd6b29a29eb11ce0f90a5ffd7b6d756f6457e493c26f4
def load_obj(filename: str, obj_group: bool=True, flip_tex_coords: bool=True, use_common_indices: bool=False, return_objects: bool=False): '\n Load from a Wavefront obj file as PyTorch tensors.\n\n Args\n ====\n obj_group: bool\n split the meshes based on materials\n flip_tex_coords: bool\n flip the v coordinate of uv by applying v\' = 1 - v\n use_common_indices: bool\n Use the same indices for position, uvs, normals.\n Not recommended since texture seams in the objects sharing\n the same positions would cause the optimization to "tear" the object\n return_objects: bool\n Output list of Object instead.\n If there is no corresponding material for a shape, assign a grey material.\n\n Returns\n =======\n if return_objects == True, return a list of Object\n if return_objects == False, return (material_map, mesh_list, light_map),\n material_map -> Map[mtl_name, WavefrontMaterial]\n mesh_list -> List[TriangleMesh]\n light_map -> Map[mtl_name, torch.Tensor]\n ' vertices_pool = [] uvs_pool = [] normals_pool = [] indices = [] uv_indices = [] normal_indices = [] vertices = [] uvs = [] normals = [] vertices_map = {} uvs_map = {} normals_map = {} material_map = {} current_mtllib = {} current_material_name = None def create_mesh(indices, uv_indices, normal_indices, vertices, uvs, normals): indices = tf.constant(indices, dtype=tf.int32) if (len(uv_indices) == 0): uv_indices = None else: uv_indices = tf.constant(uv_indices, dtype=tf.int32) if (len(normal_indices) == 0): normal_indices = None else: normal_indices = tf.constant(normal_indices, dtype=tf.int32) vertices = tf.constant(vertices) if (len(uvs) == 0): uvs = None else: uvs = tf.constant(uvs) if (len(normals) == 0): normals = None else: normals = tf.constant(normals) return TriangleMesh(indices, uv_indices, normal_indices, vertices, uvs, normals) mesh_list = [] light_map = {} with open(filename, 'r') as f: d = os.path.dirname(filename) cwd = os.getcwd() if (d != ''): os.chdir(d) for line in f: line = line.strip() splitted = re.split('\\ +', line) if (splitted[0] == 'mtllib'): current_mtllib = load_mtl(splitted[1]) elif (splitted[0] == 'usemtl'): if ((len(indices) > 0) and (obj_group is True)): mesh_list.append((current_material_name, create_mesh(indices, uv_indices, normal_indices, vertices, uvs, normals))) indices = [] uv_indices = [] normal_indices = [] vertices = [] normals = [] uvs = [] vertices_map = {} uvs_map = {} normals_map = {} mtl_name = splitted[1] current_material_name = mtl_name if (mtl_name not in material_map): m = current_mtllib[mtl_name] if (m.map_Kd is None): diffuse_reflectance = tf.constant(m.Kd, dtype=tf.float32) else: diffuse_reflectance = pyredner.imread(m.map_Kd) if (m.map_Ks is None): specular_reflectance = tf.constant(m.Ks, dtype=tf.float32) else: specular_reflectance = pyredner.imread(m.map_Ks) if (m.map_Ns is None): roughness = tf.constant([(2.0 / (m.Ns + 2.0))], dtype=tf.float32) else: roughness = (2.0 / (pyredner.imread(m.map_Ns) + 2.0)) if (m.Ke != (0.0, 0.0, 0.0)): light_map[mtl_name] = tf.constant(m.Ke, dtype=tf.float32) material_map[mtl_name] = pyredner.Material(diffuse_reflectance, specular_reflectance, roughness) elif (splitted[0] == 'v'): vertices_pool.append([float(splitted[1]), float(splitted[2]), float(splitted[3])]) elif (splitted[0] == 'vt'): u = float(splitted[1]) v = float(splitted[2]) if flip_tex_coords: v = (1 - v) uvs_pool.append([u, v]) elif (splitted[0] == 'vn'): normals_pool.append([float(splitted[1]), float(splitted[2]), float(splitted[3])]) elif (splitted[0] == 'f'): def num_indices(x): return len(re.split('/', x)) def get_index(x, i): return int(re.split('/', x)[i]) def parse_face_index(x, i): f = get_index(x, i) if (f > 0): f -= 1 return f assert (len(splitted) <= 5) def get_vertex_id(indices): pi = parse_face_index(indices, 0) uvi = None if ((num_indices(indices) > 1) and (re.split('/', indices)[1] != '')): uvi = parse_face_index(indices, 1) ni = None if ((num_indices(indices) > 2) and (re.split('/', indices)[2] != '')): ni = parse_face_index(indices, 2) if use_common_indices: key = (pi, uvi, ni) if (key in vertices_map): vertex_id = vertices_map[key] return (vertex_id, vertex_id, vertex_id) vertex_id = len(vertices) vertices_map[key] = vertex_id vertices.append(vertices_pool[pi]) if (uvi is not None): uvs.append(uvs_pool[uvi]) if (ni is not None): normals.append(normals_pool[ni]) return (vertex_id, vertex_id, vertex_id) else: vertex_id = None uv_id = None normal_id = None if (pi in vertices_map): vertex_id = vertices_map[pi] else: vertex_id = len(vertices) vertices.append(vertices_pool[pi]) vertices_map[pi] = vertex_id if (uvi is not None): if (uvi in uvs_map): uv_id = uvs_map[uvi] else: uv_id = len(uvs) uvs.append(uvs_pool[uvi]) uvs_map[uvi] = uv_id if (ni is not None): if (ni in normals_map): normal_id = normals_map[ni] else: normal_id = len(normals) normals.append(normals_pool[ni]) normals_map[ni] = normal_id return (vertex_id, uv_id, normal_id) (vid0, uv_id0, n_id0) = get_vertex_id(splitted[1]) (vid1, uv_id1, n_id1) = get_vertex_id(splitted[2]) (vid2, uv_id2, n_id2) = get_vertex_id(splitted[3]) indices.append([vid0, vid1, vid2]) if (uv_id0 is not None): assert ((uv_id1 is not None) and (uv_id2 is not None)) uv_indices.append([uv_id0, uv_id1, uv_id2]) if (n_id0 is not None): assert ((n_id1 is not None) and (n_id2 is not None)) normal_indices.append([n_id0, n_id1, n_id2]) if (len(splitted) == 5): (vid3, uv_id3, n_id3) = get_vertex_id(splitted[4]) indices.append([vid0, vid2, vid3]) if (uv_id0 is not None): assert (uv_id3 is not None) uv_indices.append([uv_id0, uv_id2, uv_id3]) if (n_id0 is not None): assert (n_id3 is not None) normal_indices.append([n_id0, n_id2, n_id3]) mesh_list.append((current_material_name, create_mesh(indices, uv_indices, normal_indices, vertices, uvs, normals))) if (d != ''): os.chdir(cwd) if return_objects: objects = [] for (mtl_name, mesh) in mesh_list: if (mtl_name in material_map): m = material_map[mtl_name] else: m = pyredner.Material(diffuse_reflectance=tf.constant((0.5, 0.5, 0.5))) if (mtl_name in light_map): l = light_map[mtl_name] else: l = None objects.append(pyredner.Object(vertices=mesh.vertices, indices=mesh.indices, material=m, light_intensity=l, uvs=mesh.uvs, normals=mesh.normals, uv_indices=mesh.uv_indices, normal_indices=mesh.normal_indices)) return objects else: return (material_map, mesh_list, light_map)
Load from a Wavefront obj file as PyTorch tensors. Args ==== obj_group: bool split the meshes based on materials flip_tex_coords: bool flip the v coordinate of uv by applying v' = 1 - v use_common_indices: bool Use the same indices for position, uvs, normals. Not recommended since texture seams in the objects sharing the same positions would cause the optimization to "tear" the object return_objects: bool Output list of Object instead. If there is no corresponding material for a shape, assign a grey material. Returns ======= if return_objects == True, return a list of Object if return_objects == False, return (material_map, mesh_list, light_map), material_map -> Map[mtl_name, WavefrontMaterial] mesh_list -> List[TriangleMesh] light_map -> Map[mtl_name, torch.Tensor]
pyredner_tensorflow/load_obj.py
load_obj
sgrabli/redner
1,146
python
def load_obj(filename: str, obj_group: bool=True, flip_tex_coords: bool=True, use_common_indices: bool=False, return_objects: bool=False): '\n Load from a Wavefront obj file as PyTorch tensors.\n\n Args\n ====\n obj_group: bool\n split the meshes based on materials\n flip_tex_coords: bool\n flip the v coordinate of uv by applying v\' = 1 - v\n use_common_indices: bool\n Use the same indices for position, uvs, normals.\n Not recommended since texture seams in the objects sharing\n the same positions would cause the optimization to "tear" the object\n return_objects: bool\n Output list of Object instead.\n If there is no corresponding material for a shape, assign a grey material.\n\n Returns\n =======\n if return_objects == True, return a list of Object\n if return_objects == False, return (material_map, mesh_list, light_map),\n material_map -> Map[mtl_name, WavefrontMaterial]\n mesh_list -> List[TriangleMesh]\n light_map -> Map[mtl_name, torch.Tensor]\n ' vertices_pool = [] uvs_pool = [] normals_pool = [] indices = [] uv_indices = [] normal_indices = [] vertices = [] uvs = [] normals = [] vertices_map = {} uvs_map = {} normals_map = {} material_map = {} current_mtllib = {} current_material_name = None def create_mesh(indices, uv_indices, normal_indices, vertices, uvs, normals): indices = tf.constant(indices, dtype=tf.int32) if (len(uv_indices) == 0): uv_indices = None else: uv_indices = tf.constant(uv_indices, dtype=tf.int32) if (len(normal_indices) == 0): normal_indices = None else: normal_indices = tf.constant(normal_indices, dtype=tf.int32) vertices = tf.constant(vertices) if (len(uvs) == 0): uvs = None else: uvs = tf.constant(uvs) if (len(normals) == 0): normals = None else: normals = tf.constant(normals) return TriangleMesh(indices, uv_indices, normal_indices, vertices, uvs, normals) mesh_list = [] light_map = {} with open(filename, 'r') as f: d = os.path.dirname(filename) cwd = os.getcwd() if (d != ): os.chdir(d) for line in f: line = line.strip() splitted = re.split('\\ +', line) if (splitted[0] == 'mtllib'): current_mtllib = load_mtl(splitted[1]) elif (splitted[0] == 'usemtl'): if ((len(indices) > 0) and (obj_group is True)): mesh_list.append((current_material_name, create_mesh(indices, uv_indices, normal_indices, vertices, uvs, normals))) indices = [] uv_indices = [] normal_indices = [] vertices = [] normals = [] uvs = [] vertices_map = {} uvs_map = {} normals_map = {} mtl_name = splitted[1] current_material_name = mtl_name if (mtl_name not in material_map): m = current_mtllib[mtl_name] if (m.map_Kd is None): diffuse_reflectance = tf.constant(m.Kd, dtype=tf.float32) else: diffuse_reflectance = pyredner.imread(m.map_Kd) if (m.map_Ks is None): specular_reflectance = tf.constant(m.Ks, dtype=tf.float32) else: specular_reflectance = pyredner.imread(m.map_Ks) if (m.map_Ns is None): roughness = tf.constant([(2.0 / (m.Ns + 2.0))], dtype=tf.float32) else: roughness = (2.0 / (pyredner.imread(m.map_Ns) + 2.0)) if (m.Ke != (0.0, 0.0, 0.0)): light_map[mtl_name] = tf.constant(m.Ke, dtype=tf.float32) material_map[mtl_name] = pyredner.Material(diffuse_reflectance, specular_reflectance, roughness) elif (splitted[0] == 'v'): vertices_pool.append([float(splitted[1]), float(splitted[2]), float(splitted[3])]) elif (splitted[0] == 'vt'): u = float(splitted[1]) v = float(splitted[2]) if flip_tex_coords: v = (1 - v) uvs_pool.append([u, v]) elif (splitted[0] == 'vn'): normals_pool.append([float(splitted[1]), float(splitted[2]), float(splitted[3])]) elif (splitted[0] == 'f'): def num_indices(x): return len(re.split('/', x)) def get_index(x, i): return int(re.split('/', x)[i]) def parse_face_index(x, i): f = get_index(x, i) if (f > 0): f -= 1 return f assert (len(splitted) <= 5) def get_vertex_id(indices): pi = parse_face_index(indices, 0) uvi = None if ((num_indices(indices) > 1) and (re.split('/', indices)[1] != )): uvi = parse_face_index(indices, 1) ni = None if ((num_indices(indices) > 2) and (re.split('/', indices)[2] != )): ni = parse_face_index(indices, 2) if use_common_indices: key = (pi, uvi, ni) if (key in vertices_map): vertex_id = vertices_map[key] return (vertex_id, vertex_id, vertex_id) vertex_id = len(vertices) vertices_map[key] = vertex_id vertices.append(vertices_pool[pi]) if (uvi is not None): uvs.append(uvs_pool[uvi]) if (ni is not None): normals.append(normals_pool[ni]) return (vertex_id, vertex_id, vertex_id) else: vertex_id = None uv_id = None normal_id = None if (pi in vertices_map): vertex_id = vertices_map[pi] else: vertex_id = len(vertices) vertices.append(vertices_pool[pi]) vertices_map[pi] = vertex_id if (uvi is not None): if (uvi in uvs_map): uv_id = uvs_map[uvi] else: uv_id = len(uvs) uvs.append(uvs_pool[uvi]) uvs_map[uvi] = uv_id if (ni is not None): if (ni in normals_map): normal_id = normals_map[ni] else: normal_id = len(normals) normals.append(normals_pool[ni]) normals_map[ni] = normal_id return (vertex_id, uv_id, normal_id) (vid0, uv_id0, n_id0) = get_vertex_id(splitted[1]) (vid1, uv_id1, n_id1) = get_vertex_id(splitted[2]) (vid2, uv_id2, n_id2) = get_vertex_id(splitted[3]) indices.append([vid0, vid1, vid2]) if (uv_id0 is not None): assert ((uv_id1 is not None) and (uv_id2 is not None)) uv_indices.append([uv_id0, uv_id1, uv_id2]) if (n_id0 is not None): assert ((n_id1 is not None) and (n_id2 is not None)) normal_indices.append([n_id0, n_id1, n_id2]) if (len(splitted) == 5): (vid3, uv_id3, n_id3) = get_vertex_id(splitted[4]) indices.append([vid0, vid2, vid3]) if (uv_id0 is not None): assert (uv_id3 is not None) uv_indices.append([uv_id0, uv_id2, uv_id3]) if (n_id0 is not None): assert (n_id3 is not None) normal_indices.append([n_id0, n_id2, n_id3]) mesh_list.append((current_material_name, create_mesh(indices, uv_indices, normal_indices, vertices, uvs, normals))) if (d != ): os.chdir(cwd) if return_objects: objects = [] for (mtl_name, mesh) in mesh_list: if (mtl_name in material_map): m = material_map[mtl_name] else: m = pyredner.Material(diffuse_reflectance=tf.constant((0.5, 0.5, 0.5))) if (mtl_name in light_map): l = light_map[mtl_name] else: l = None objects.append(pyredner.Object(vertices=mesh.vertices, indices=mesh.indices, material=m, light_intensity=l, uvs=mesh.uvs, normals=mesh.normals, uv_indices=mesh.uv_indices, normal_indices=mesh.normal_indices)) return objects else: return (material_map, mesh_list, light_map)
def load_obj(filename: str, obj_group: bool=True, flip_tex_coords: bool=True, use_common_indices: bool=False, return_objects: bool=False): '\n Load from a Wavefront obj file as PyTorch tensors.\n\n Args\n ====\n obj_group: bool\n split the meshes based on materials\n flip_tex_coords: bool\n flip the v coordinate of uv by applying v\' = 1 - v\n use_common_indices: bool\n Use the same indices for position, uvs, normals.\n Not recommended since texture seams in the objects sharing\n the same positions would cause the optimization to "tear" the object\n return_objects: bool\n Output list of Object instead.\n If there is no corresponding material for a shape, assign a grey material.\n\n Returns\n =======\n if return_objects == True, return a list of Object\n if return_objects == False, return (material_map, mesh_list, light_map),\n material_map -> Map[mtl_name, WavefrontMaterial]\n mesh_list -> List[TriangleMesh]\n light_map -> Map[mtl_name, torch.Tensor]\n ' vertices_pool = [] uvs_pool = [] normals_pool = [] indices = [] uv_indices = [] normal_indices = [] vertices = [] uvs = [] normals = [] vertices_map = {} uvs_map = {} normals_map = {} material_map = {} current_mtllib = {} current_material_name = None def create_mesh(indices, uv_indices, normal_indices, vertices, uvs, normals): indices = tf.constant(indices, dtype=tf.int32) if (len(uv_indices) == 0): uv_indices = None else: uv_indices = tf.constant(uv_indices, dtype=tf.int32) if (len(normal_indices) == 0): normal_indices = None else: normal_indices = tf.constant(normal_indices, dtype=tf.int32) vertices = tf.constant(vertices) if (len(uvs) == 0): uvs = None else: uvs = tf.constant(uvs) if (len(normals) == 0): normals = None else: normals = tf.constant(normals) return TriangleMesh(indices, uv_indices, normal_indices, vertices, uvs, normals) mesh_list = [] light_map = {} with open(filename, 'r') as f: d = os.path.dirname(filename) cwd = os.getcwd() if (d != ): os.chdir(d) for line in f: line = line.strip() splitted = re.split('\\ +', line) if (splitted[0] == 'mtllib'): current_mtllib = load_mtl(splitted[1]) elif (splitted[0] == 'usemtl'): if ((len(indices) > 0) and (obj_group is True)): mesh_list.append((current_material_name, create_mesh(indices, uv_indices, normal_indices, vertices, uvs, normals))) indices = [] uv_indices = [] normal_indices = [] vertices = [] normals = [] uvs = [] vertices_map = {} uvs_map = {} normals_map = {} mtl_name = splitted[1] current_material_name = mtl_name if (mtl_name not in material_map): m = current_mtllib[mtl_name] if (m.map_Kd is None): diffuse_reflectance = tf.constant(m.Kd, dtype=tf.float32) else: diffuse_reflectance = pyredner.imread(m.map_Kd) if (m.map_Ks is None): specular_reflectance = tf.constant(m.Ks, dtype=tf.float32) else: specular_reflectance = pyredner.imread(m.map_Ks) if (m.map_Ns is None): roughness = tf.constant([(2.0 / (m.Ns + 2.0))], dtype=tf.float32) else: roughness = (2.0 / (pyredner.imread(m.map_Ns) + 2.0)) if (m.Ke != (0.0, 0.0, 0.0)): light_map[mtl_name] = tf.constant(m.Ke, dtype=tf.float32) material_map[mtl_name] = pyredner.Material(diffuse_reflectance, specular_reflectance, roughness) elif (splitted[0] == 'v'): vertices_pool.append([float(splitted[1]), float(splitted[2]), float(splitted[3])]) elif (splitted[0] == 'vt'): u = float(splitted[1]) v = float(splitted[2]) if flip_tex_coords: v = (1 - v) uvs_pool.append([u, v]) elif (splitted[0] == 'vn'): normals_pool.append([float(splitted[1]), float(splitted[2]), float(splitted[3])]) elif (splitted[0] == 'f'): def num_indices(x): return len(re.split('/', x)) def get_index(x, i): return int(re.split('/', x)[i]) def parse_face_index(x, i): f = get_index(x, i) if (f > 0): f -= 1 return f assert (len(splitted) <= 5) def get_vertex_id(indices): pi = parse_face_index(indices, 0) uvi = None if ((num_indices(indices) > 1) and (re.split('/', indices)[1] != )): uvi = parse_face_index(indices, 1) ni = None if ((num_indices(indices) > 2) and (re.split('/', indices)[2] != )): ni = parse_face_index(indices, 2) if use_common_indices: key = (pi, uvi, ni) if (key in vertices_map): vertex_id = vertices_map[key] return (vertex_id, vertex_id, vertex_id) vertex_id = len(vertices) vertices_map[key] = vertex_id vertices.append(vertices_pool[pi]) if (uvi is not None): uvs.append(uvs_pool[uvi]) if (ni is not None): normals.append(normals_pool[ni]) return (vertex_id, vertex_id, vertex_id) else: vertex_id = None uv_id = None normal_id = None if (pi in vertices_map): vertex_id = vertices_map[pi] else: vertex_id = len(vertices) vertices.append(vertices_pool[pi]) vertices_map[pi] = vertex_id if (uvi is not None): if (uvi in uvs_map): uv_id = uvs_map[uvi] else: uv_id = len(uvs) uvs.append(uvs_pool[uvi]) uvs_map[uvi] = uv_id if (ni is not None): if (ni in normals_map): normal_id = normals_map[ni] else: normal_id = len(normals) normals.append(normals_pool[ni]) normals_map[ni] = normal_id return (vertex_id, uv_id, normal_id) (vid0, uv_id0, n_id0) = get_vertex_id(splitted[1]) (vid1, uv_id1, n_id1) = get_vertex_id(splitted[2]) (vid2, uv_id2, n_id2) = get_vertex_id(splitted[3]) indices.append([vid0, vid1, vid2]) if (uv_id0 is not None): assert ((uv_id1 is not None) and (uv_id2 is not None)) uv_indices.append([uv_id0, uv_id1, uv_id2]) if (n_id0 is not None): assert ((n_id1 is not None) and (n_id2 is not None)) normal_indices.append([n_id0, n_id1, n_id2]) if (len(splitted) == 5): (vid3, uv_id3, n_id3) = get_vertex_id(splitted[4]) indices.append([vid0, vid2, vid3]) if (uv_id0 is not None): assert (uv_id3 is not None) uv_indices.append([uv_id0, uv_id2, uv_id3]) if (n_id0 is not None): assert (n_id3 is not None) normal_indices.append([n_id0, n_id2, n_id3]) mesh_list.append((current_material_name, create_mesh(indices, uv_indices, normal_indices, vertices, uvs, normals))) if (d != ): os.chdir(cwd) if return_objects: objects = [] for (mtl_name, mesh) in mesh_list: if (mtl_name in material_map): m = material_map[mtl_name] else: m = pyredner.Material(diffuse_reflectance=tf.constant((0.5, 0.5, 0.5))) if (mtl_name in light_map): l = light_map[mtl_name] else: l = None objects.append(pyredner.Object(vertices=mesh.vertices, indices=mesh.indices, material=m, light_intensity=l, uvs=mesh.uvs, normals=mesh.normals, uv_indices=mesh.uv_indices, normal_indices=mesh.normal_indices)) return objects else: return (material_map, mesh_list, light_map)<|docstring|>Load from a Wavefront obj file as PyTorch tensors. Args ==== obj_group: bool split the meshes based on materials flip_tex_coords: bool flip the v coordinate of uv by applying v' = 1 - v use_common_indices: bool Use the same indices for position, uvs, normals. Not recommended since texture seams in the objects sharing the same positions would cause the optimization to "tear" the object return_objects: bool Output list of Object instead. If there is no corresponding material for a shape, assign a grey material. Returns ======= if return_objects == True, return a list of Object if return_objects == False, return (material_map, mesh_list, light_map), material_map -> Map[mtl_name, WavefrontMaterial] mesh_list -> List[TriangleMesh] light_map -> Map[mtl_name, torch.Tensor]<|endoftext|>
75db80751247eb8522808906d493077b55c010abf3799e2141da8f8894bc771b
def project_3d_to_2d(cam_f, cam_c, verts): '\n project 3d points to original 2d coordinate space.\n Input:\n cam: (1, 3) camera parameters (f, cx, cy) output by model.\n verts: 3d verts output by model.\n proc_param: preprocessing parameters. this is for converting points from crop (model input) to original image.\n Output:\n ' fx = cam_f[0] fy = cam_f[1] tx = cam_c[0] ty = cam_c[1] verts = verts.reshape((- 1), 3) verts2d = np.zeros((verts.shape[0], 2)) print(verts2d.shape) verts2d[(:, 0)] = (((fx * verts[(:, 0)]) / verts[(:, 2)]) + tx) verts2d[(:, 1)] = (((fy * verts[(:, 1)]) / verts[(:, 2)]) + ty) return verts2d
project 3d points to original 2d coordinate space. Input: cam: (1, 3) camera parameters (f, cx, cy) output by model. verts: 3d verts output by model. proc_param: preprocessing parameters. this is for converting points from crop (model input) to original image. Output:
genmotion/algorithm/action2motion/utils/matrix_transformer.py
project_3d_to_2d
yizhouzhao/GenMotion
32
python
def project_3d_to_2d(cam_f, cam_c, verts): '\n project 3d points to original 2d coordinate space.\n Input:\n cam: (1, 3) camera parameters (f, cx, cy) output by model.\n verts: 3d verts output by model.\n proc_param: preprocessing parameters. this is for converting points from crop (model input) to original image.\n Output:\n ' fx = cam_f[0] fy = cam_f[1] tx = cam_c[0] ty = cam_c[1] verts = verts.reshape((- 1), 3) verts2d = np.zeros((verts.shape[0], 2)) print(verts2d.shape) verts2d[(:, 0)] = (((fx * verts[(:, 0)]) / verts[(:, 2)]) + tx) verts2d[(:, 1)] = (((fy * verts[(:, 1)]) / verts[(:, 2)]) + ty) return verts2d
def project_3d_to_2d(cam_f, cam_c, verts): '\n project 3d points to original 2d coordinate space.\n Input:\n cam: (1, 3) camera parameters (f, cx, cy) output by model.\n verts: 3d verts output by model.\n proc_param: preprocessing parameters. this is for converting points from crop (model input) to original image.\n Output:\n ' fx = cam_f[0] fy = cam_f[1] tx = cam_c[0] ty = cam_c[1] verts = verts.reshape((- 1), 3) verts2d = np.zeros((verts.shape[0], 2)) print(verts2d.shape) verts2d[(:, 0)] = (((fx * verts[(:, 0)]) / verts[(:, 2)]) + tx) verts2d[(:, 1)] = (((fy * verts[(:, 1)]) / verts[(:, 2)]) + ty) return verts2d<|docstring|>project 3d points to original 2d coordinate space. Input: cam: (1, 3) camera parameters (f, cx, cy) output by model. verts: 3d verts output by model. proc_param: preprocessing parameters. this is for converting points from crop (model input) to original image. Output:<|endoftext|>
0b9689dfb85d6926c18c50fb6ef9b1b392b9a84c6445d5d53576b29ddf69611c
@property def primary_group(self): 'Simply returns the first in `groups`, if any, else None.' return self.groups.first()
Simply returns the first in `groups`, if any, else None.
aldryn_people/models.py
primary_group
evgeny-dmi3ev/js-people
0
python
@property def primary_group(self): return self.groups.first()
@property def primary_group(self): return self.groups.first()<|docstring|>Simply returns the first in `groups`, if any, else None.<|endoftext|>
4bb188c1407ec165de8c05b099797c598c1aed0f2502c35b46f6cd2dd3522a95
def init_population(data, centers_num=12, n=20): '\n Choose randomly genes for chromosomes between minima and maxima of \n corresponding genes in data\n\n data : data to get minima and maxima for generation\n centers_num : number of centroids per individual to generate\n n : number of indiwiduals\n\n return : initial population pop_0, maxima and minima for mutation\n ' logging.info('initialize population...') data = np.array(data) maxima = data.max(axis=0) minima = data.min(axis=0) pop = (((maxima - minima) * np.random.random_sample((n, centers_num, len(data[0])))) + minima) logging.debug('init_population - end') return (pop, maxima, minima)
Choose randomly genes for chromosomes between minima and maxima of corresponding genes in data data : data to get minima and maxima for generation centers_num : number of centroids per individual to generate n : number of indiwiduals return : initial population pop_0, maxima and minima for mutation
evolclust/evolution.py
init_population
maks-ym/clustering-wtih-ewolution-strategy
0
python
def init_population(data, centers_num=12, n=20): '\n Choose randomly genes for chromosomes between minima and maxima of \n corresponding genes in data\n\n data : data to get minima and maxima for generation\n centers_num : number of centroids per individual to generate\n n : number of indiwiduals\n\n return : initial population pop_0, maxima and minima for mutation\n ' logging.info('initialize population...') data = np.array(data) maxima = data.max(axis=0) minima = data.min(axis=0) pop = (((maxima - minima) * np.random.random_sample((n, centers_num, len(data[0])))) + minima) logging.debug('init_population - end') return (pop, maxima, minima)
def init_population(data, centers_num=12, n=20): '\n Choose randomly genes for chromosomes between minima and maxima of \n corresponding genes in data\n\n data : data to get minima and maxima for generation\n centers_num : number of centroids per individual to generate\n n : number of indiwiduals\n\n return : initial population pop_0, maxima and minima for mutation\n ' logging.info('initialize population...') data = np.array(data) maxima = data.max(axis=0) minima = data.min(axis=0) pop = (((maxima - minima) * np.random.random_sample((n, centers_num, len(data[0])))) + minima) logging.debug('init_population - end') return (pop, maxima, minima)<|docstring|>Choose randomly genes for chromosomes between minima and maxima of corresponding genes in data data : data to get minima and maxima for generation centers_num : number of centroids per individual to generate n : number of indiwiduals return : initial population pop_0, maxima and minima for mutation<|endoftext|>
57d8a1d5f3409e52ae485c39025b0c7dc8389f223728b1dea09a78d99df55ae4
def get_adapt_scores(pop_t, data, true_labs, adapt_function='silhouette', dist_measure='euclidean', loggin_pref=''): '\n Cluster data for eacn individual (centroids set) in population and get scores.\n\n pop_t : population to score\n true_labs : for data provided\n adapt_function : {"silhouette"|"info_gain"}; these values to be returned\n dist_measure : {"euclidean"|"manhattan"|"cosine"} used for clustering and silhouette score\n\n return : scores for each individual; 1-D array of length n (pop_t size)\n ' logging.debug('{}getting adapt scores...'.format(loggin_pref)) true_num = len(np.unique(true_labs)) scores = [] for (i, individual) in enumerate(pop_t): logging.debug('{}indiv {}: Clustering...'.format(loggin_pref, i)) labs = cluster.Centroids.cluster(data, individual, dist_func=dist_measure) uniq_num = len(np.unique(labs)) logging.debug('{}indiv {}: labs unique num: {}'.format(loggin_pref, i, uniq_num)) if (adapt_function == 'silhouette'): if (uniq_num == 1): cur_score = (- 1) elif (uniq_num < true_num): logging.debug('{}indiv {}: computing silhouette...'.format(loggin_pref, i)) cur_score = cluster.Evaluate.silhouette(data, labs, dist_func=dist_measure) corrected_score = (cur_score - ((true_num - uniq_num) * 0.1)) cur_score = max(corrected_score, (- 1)) else: logging.debug('{}indiv {}: computing silhouette...'.format(loggin_pref, i)) cur_score = cluster.Evaluate.silhouette(data, labs, dist_func=dist_measure) logging.debug('{}indiv {}: cur_score: {}'.format(loggin_pref, i, cur_score)) scores.append(cur_score) elif (adapt_function == 'info_gain'): logging.debug('{}indiv {}: computing info_gain...'.format(loggin_pref, i)) labs = cluster.Utils.adjust_labels(labs, true_labs) logging.debug('{}indiv {}: adjust_labels length: {}'.format(loggin_pref, i, len(labs))) cur_score = cluster.Evaluate.information_gain(true_labs, labs) logging.debug('{}indiv {}: cur_score: {}'.format(loggin_pref, i, cur_score)) scores.append(cur_score) if ((adapt_function == 'silhouette') and (len(pop_t) > 1)): scores = np.array(scores) uniq_scores = set(scores) if (len(uniq_scores) > 2): scores[(scores == (- 1))] = sorted(set(uniq_scores))[1] scores = ((scores + 1) / 2) logging.debug('{}getting adapt scores... DONE'.format(loggin_pref)) return scores
Cluster data for eacn individual (centroids set) in population and get scores. pop_t : population to score true_labs : for data provided adapt_function : {"silhouette"|"info_gain"}; these values to be returned dist_measure : {"euclidean"|"manhattan"|"cosine"} used for clustering and silhouette score return : scores for each individual; 1-D array of length n (pop_t size)
evolclust/evolution.py
get_adapt_scores
maks-ym/clustering-wtih-ewolution-strategy
0
python
def get_adapt_scores(pop_t, data, true_labs, adapt_function='silhouette', dist_measure='euclidean', loggin_pref=): '\n Cluster data for eacn individual (centroids set) in population and get scores.\n\n pop_t : population to score\n true_labs : for data provided\n adapt_function : {"silhouette"|"info_gain"}; these values to be returned\n dist_measure : {"euclidean"|"manhattan"|"cosine"} used for clustering and silhouette score\n\n return : scores for each individual; 1-D array of length n (pop_t size)\n ' logging.debug('{}getting adapt scores...'.format(loggin_pref)) true_num = len(np.unique(true_labs)) scores = [] for (i, individual) in enumerate(pop_t): logging.debug('{}indiv {}: Clustering...'.format(loggin_pref, i)) labs = cluster.Centroids.cluster(data, individual, dist_func=dist_measure) uniq_num = len(np.unique(labs)) logging.debug('{}indiv {}: labs unique num: {}'.format(loggin_pref, i, uniq_num)) if (adapt_function == 'silhouette'): if (uniq_num == 1): cur_score = (- 1) elif (uniq_num < true_num): logging.debug('{}indiv {}: computing silhouette...'.format(loggin_pref, i)) cur_score = cluster.Evaluate.silhouette(data, labs, dist_func=dist_measure) corrected_score = (cur_score - ((true_num - uniq_num) * 0.1)) cur_score = max(corrected_score, (- 1)) else: logging.debug('{}indiv {}: computing silhouette...'.format(loggin_pref, i)) cur_score = cluster.Evaluate.silhouette(data, labs, dist_func=dist_measure) logging.debug('{}indiv {}: cur_score: {}'.format(loggin_pref, i, cur_score)) scores.append(cur_score) elif (adapt_function == 'info_gain'): logging.debug('{}indiv {}: computing info_gain...'.format(loggin_pref, i)) labs = cluster.Utils.adjust_labels(labs, true_labs) logging.debug('{}indiv {}: adjust_labels length: {}'.format(loggin_pref, i, len(labs))) cur_score = cluster.Evaluate.information_gain(true_labs, labs) logging.debug('{}indiv {}: cur_score: {}'.format(loggin_pref, i, cur_score)) scores.append(cur_score) if ((adapt_function == 'silhouette') and (len(pop_t) > 1)): scores = np.array(scores) uniq_scores = set(scores) if (len(uniq_scores) > 2): scores[(scores == (- 1))] = sorted(set(uniq_scores))[1] scores = ((scores + 1) / 2) logging.debug('{}getting adapt scores... DONE'.format(loggin_pref)) return scores
def get_adapt_scores(pop_t, data, true_labs, adapt_function='silhouette', dist_measure='euclidean', loggin_pref=): '\n Cluster data for eacn individual (centroids set) in population and get scores.\n\n pop_t : population to score\n true_labs : for data provided\n adapt_function : {"silhouette"|"info_gain"}; these values to be returned\n dist_measure : {"euclidean"|"manhattan"|"cosine"} used for clustering and silhouette score\n\n return : scores for each individual; 1-D array of length n (pop_t size)\n ' logging.debug('{}getting adapt scores...'.format(loggin_pref)) true_num = len(np.unique(true_labs)) scores = [] for (i, individual) in enumerate(pop_t): logging.debug('{}indiv {}: Clustering...'.format(loggin_pref, i)) labs = cluster.Centroids.cluster(data, individual, dist_func=dist_measure) uniq_num = len(np.unique(labs)) logging.debug('{}indiv {}: labs unique num: {}'.format(loggin_pref, i, uniq_num)) if (adapt_function == 'silhouette'): if (uniq_num == 1): cur_score = (- 1) elif (uniq_num < true_num): logging.debug('{}indiv {}: computing silhouette...'.format(loggin_pref, i)) cur_score = cluster.Evaluate.silhouette(data, labs, dist_func=dist_measure) corrected_score = (cur_score - ((true_num - uniq_num) * 0.1)) cur_score = max(corrected_score, (- 1)) else: logging.debug('{}indiv {}: computing silhouette...'.format(loggin_pref, i)) cur_score = cluster.Evaluate.silhouette(data, labs, dist_func=dist_measure) logging.debug('{}indiv {}: cur_score: {}'.format(loggin_pref, i, cur_score)) scores.append(cur_score) elif (adapt_function == 'info_gain'): logging.debug('{}indiv {}: computing info_gain...'.format(loggin_pref, i)) labs = cluster.Utils.adjust_labels(labs, true_labs) logging.debug('{}indiv {}: adjust_labels length: {}'.format(loggin_pref, i, len(labs))) cur_score = cluster.Evaluate.information_gain(true_labs, labs) logging.debug('{}indiv {}: cur_score: {}'.format(loggin_pref, i, cur_score)) scores.append(cur_score) if ((adapt_function == 'silhouette') and (len(pop_t) > 1)): scores = np.array(scores) uniq_scores = set(scores) if (len(uniq_scores) > 2): scores[(scores == (- 1))] = sorted(set(uniq_scores))[1] scores = ((scores + 1) / 2) logging.debug('{}getting adapt scores... DONE'.format(loggin_pref)) return scores<|docstring|>Cluster data for eacn individual (centroids set) in population and get scores. pop_t : population to score true_labs : for data provided adapt_function : {"silhouette"|"info_gain"}; these values to be returned dist_measure : {"euclidean"|"manhattan"|"cosine"} used for clustering and silhouette score return : scores for each individual; 1-D array of length n (pop_t size)<|endoftext|>
9b286708f79bc546112bd33141f575a2e8d5a495d76ac18d3cd9305ac0d06f6f
def reproduction(pop_t, adapt_scores, loggin_pref=''): '\n Randomly copy individuals from P_t to T_t, but based on adapt_scores:\n the higher the score the greater propability to be copied (reproduced)\n\n return : new temporary population T_t\n ' logging.debug('{}reproducing...'.format(loggin_pref)) adapt_scores = np.array(adapt_scores) logging.debug('{}adapt_scores len: {}'.format(loggin_pref, len(adapt_scores))) prob_repr = ((adapt_scores - adapt_scores.min()) / (np.sum(adapt_scores) - adapt_scores.min())) prob_repr = np.cumsum((prob_repr / sum(prob_repr))) logging.debug('{}cumulative prob_repr len: {}'.format(loggin_pref, len(prob_repr))) n = len(pop_t) new_indices = [np.argmax((np.random.random() < prob_repr)) for i in range(n)] logging.debug('{}new_indices: {}'.format(loggin_pref, new_indices)) logging.debug('{}reproducing... DONE'.format(loggin_pref)) return pop_t[new_indices]
Randomly copy individuals from P_t to T_t, but based on adapt_scores: the higher the score the greater propability to be copied (reproduced) return : new temporary population T_t
evolclust/evolution.py
reproduction
maks-ym/clustering-wtih-ewolution-strategy
0
python
def reproduction(pop_t, adapt_scores, loggin_pref=): '\n Randomly copy individuals from P_t to T_t, but based on adapt_scores:\n the higher the score the greater propability to be copied (reproduced)\n\n return : new temporary population T_t\n ' logging.debug('{}reproducing...'.format(loggin_pref)) adapt_scores = np.array(adapt_scores) logging.debug('{}adapt_scores len: {}'.format(loggin_pref, len(adapt_scores))) prob_repr = ((adapt_scores - adapt_scores.min()) / (np.sum(adapt_scores) - adapt_scores.min())) prob_repr = np.cumsum((prob_repr / sum(prob_repr))) logging.debug('{}cumulative prob_repr len: {}'.format(loggin_pref, len(prob_repr))) n = len(pop_t) new_indices = [np.argmax((np.random.random() < prob_repr)) for i in range(n)] logging.debug('{}new_indices: {}'.format(loggin_pref, new_indices)) logging.debug('{}reproducing... DONE'.format(loggin_pref)) return pop_t[new_indices]
def reproduction(pop_t, adapt_scores, loggin_pref=): '\n Randomly copy individuals from P_t to T_t, but based on adapt_scores:\n the higher the score the greater propability to be copied (reproduced)\n\n return : new temporary population T_t\n ' logging.debug('{}reproducing...'.format(loggin_pref)) adapt_scores = np.array(adapt_scores) logging.debug('{}adapt_scores len: {}'.format(loggin_pref, len(adapt_scores))) prob_repr = ((adapt_scores - adapt_scores.min()) / (np.sum(adapt_scores) - adapt_scores.min())) prob_repr = np.cumsum((prob_repr / sum(prob_repr))) logging.debug('{}cumulative prob_repr len: {}'.format(loggin_pref, len(prob_repr))) n = len(pop_t) new_indices = [np.argmax((np.random.random() < prob_repr)) for i in range(n)] logging.debug('{}new_indices: {}'.format(loggin_pref, new_indices)) logging.debug('{}reproducing... DONE'.format(loggin_pref)) return pop_t[new_indices]<|docstring|>Randomly copy individuals from P_t to T_t, but based on adapt_scores: the higher the score the greater propability to be copied (reproduced) return : new temporary population T_t<|endoftext|>
383c2bb0b3a76bcc1f6f6c65812fd3fb2222e3ce04579f9ec4188d901cc532fb
def crossover(temp_pop_t, prob_cross=0.7, loggin_pref=''): '\n Simple one point crossover of chromosomes (in temporary population T_t).\n\n Steps:\n - split population into pairs\n - crossover with probability prob_cross, choose randomly the place to split with uniform distribution\n\n return : modified temporary population T_t\n ' logging.debug('{}crossover...'.format(loggin_pref)) mod_pop = np.zeros(temp_pop_t.shape) n_pairs = (len(temp_pop_t) // 2) cut_bools = (np.random.rand(n_pairs) < prob_cross) cut_places = np.random.randint(1, len(temp_pop_t[0][0]), size=n_pairs) pairs = [i for i in range(n_pairs)] for (pair_i, cut_bool, cut_i) in zip(pairs, cut_bools, cut_places): if cut_bool: parent_1 = temp_pop_t[(2 * pair_i)] parent_2 = temp_pop_t[((2 * pair_i) + 1)] mod_pop[(2 * pair_i)] = np.hstack((parent_1[(:, :cut_i)], parent_2[(:, cut_i:)])) mod_pop[((2 * pair_i) + 1)] = np.hstack((parent_2[(:, :cut_i)], parent_1[(:, cut_i:)])) logging.debug('{}crossover... DONE'.format(loggin_pref)) return mod_pop
Simple one point crossover of chromosomes (in temporary population T_t). Steps: - split population into pairs - crossover with probability prob_cross, choose randomly the place to split with uniform distribution return : modified temporary population T_t
evolclust/evolution.py
crossover
maks-ym/clustering-wtih-ewolution-strategy
0
python
def crossover(temp_pop_t, prob_cross=0.7, loggin_pref=): '\n Simple one point crossover of chromosomes (in temporary population T_t).\n\n Steps:\n - split population into pairs\n - crossover with probability prob_cross, choose randomly the place to split with uniform distribution\n\n return : modified temporary population T_t\n ' logging.debug('{}crossover...'.format(loggin_pref)) mod_pop = np.zeros(temp_pop_t.shape) n_pairs = (len(temp_pop_t) // 2) cut_bools = (np.random.rand(n_pairs) < prob_cross) cut_places = np.random.randint(1, len(temp_pop_t[0][0]), size=n_pairs) pairs = [i for i in range(n_pairs)] for (pair_i, cut_bool, cut_i) in zip(pairs, cut_bools, cut_places): if cut_bool: parent_1 = temp_pop_t[(2 * pair_i)] parent_2 = temp_pop_t[((2 * pair_i) + 1)] mod_pop[(2 * pair_i)] = np.hstack((parent_1[(:, :cut_i)], parent_2[(:, cut_i:)])) mod_pop[((2 * pair_i) + 1)] = np.hstack((parent_2[(:, :cut_i)], parent_1[(:, cut_i:)])) logging.debug('{}crossover... DONE'.format(loggin_pref)) return mod_pop
def crossover(temp_pop_t, prob_cross=0.7, loggin_pref=): '\n Simple one point crossover of chromosomes (in temporary population T_t).\n\n Steps:\n - split population into pairs\n - crossover with probability prob_cross, choose randomly the place to split with uniform distribution\n\n return : modified temporary population T_t\n ' logging.debug('{}crossover...'.format(loggin_pref)) mod_pop = np.zeros(temp_pop_t.shape) n_pairs = (len(temp_pop_t) // 2) cut_bools = (np.random.rand(n_pairs) < prob_cross) cut_places = np.random.randint(1, len(temp_pop_t[0][0]), size=n_pairs) pairs = [i for i in range(n_pairs)] for (pair_i, cut_bool, cut_i) in zip(pairs, cut_bools, cut_places): if cut_bool: parent_1 = temp_pop_t[(2 * pair_i)] parent_2 = temp_pop_t[((2 * pair_i) + 1)] mod_pop[(2 * pair_i)] = np.hstack((parent_1[(:, :cut_i)], parent_2[(:, cut_i:)])) mod_pop[((2 * pair_i) + 1)] = np.hstack((parent_2[(:, :cut_i)], parent_1[(:, cut_i:)])) logging.debug('{}crossover... DONE'.format(loggin_pref)) return mod_pop<|docstring|>Simple one point crossover of chromosomes (in temporary population T_t). Steps: - split population into pairs - crossover with probability prob_cross, choose randomly the place to split with uniform distribution return : modified temporary population T_t<|endoftext|>
e76beb1a3dd97656c79a8e5f0c1ade8f792be175f818f90d228fab1a4e13b6c9
def mutation(pop_o_t, prob_mutation=0.1, min=(- 1), max=1, loggin_pref=''): '\n Mutation of each gene with probability prob_mutation.\n If mutate, choose new gene value from corresponding range between min and max\n\n return : new child population O_t\n ' logging.debug('{}mutation...'.format(loggin_pref)) for (i_i, ind) in enumerate(pop_o_t): for (c_i, centroid) in enumerate(ind): mutate_bools = (np.random.rand(len(centroid)) < prob_mutation) mutate_vals = (((max - min) * np.random.random(len(centroid))) + min) centroid[mutate_bools] = mutate_vals[mutate_bools] pop_o_t[(i_i, c_i, :)] = centroid[:] logging.debug('{}indiv {}; centroid {}; mutated genes {}/{}'.format(loggin_pref, i_i, c_i, np.sum(mutate_bools), len(centroid))) logging.debug('{}mutation... DONE'.format(loggin_pref)) return pop_o_t
Mutation of each gene with probability prob_mutation. If mutate, choose new gene value from corresponding range between min and max return : new child population O_t
evolclust/evolution.py
mutation
maks-ym/clustering-wtih-ewolution-strategy
0
python
def mutation(pop_o_t, prob_mutation=0.1, min=(- 1), max=1, loggin_pref=): '\n Mutation of each gene with probability prob_mutation.\n If mutate, choose new gene value from corresponding range between min and max\n\n return : new child population O_t\n ' logging.debug('{}mutation...'.format(loggin_pref)) for (i_i, ind) in enumerate(pop_o_t): for (c_i, centroid) in enumerate(ind): mutate_bools = (np.random.rand(len(centroid)) < prob_mutation) mutate_vals = (((max - min) * np.random.random(len(centroid))) + min) centroid[mutate_bools] = mutate_vals[mutate_bools] pop_o_t[(i_i, c_i, :)] = centroid[:] logging.debug('{}indiv {}; centroid {}; mutated genes {}/{}'.format(loggin_pref, i_i, c_i, np.sum(mutate_bools), len(centroid))) logging.debug('{}mutation... DONE'.format(loggin_pref)) return pop_o_t
def mutation(pop_o_t, prob_mutation=0.1, min=(- 1), max=1, loggin_pref=): '\n Mutation of each gene with probability prob_mutation.\n If mutate, choose new gene value from corresponding range between min and max\n\n return : new child population O_t\n ' logging.debug('{}mutation...'.format(loggin_pref)) for (i_i, ind) in enumerate(pop_o_t): for (c_i, centroid) in enumerate(ind): mutate_bools = (np.random.rand(len(centroid)) < prob_mutation) mutate_vals = (((max - min) * np.random.random(len(centroid))) + min) centroid[mutate_bools] = mutate_vals[mutate_bools] pop_o_t[(i_i, c_i, :)] = centroid[:] logging.debug('{}indiv {}; centroid {}; mutated genes {}/{}'.format(loggin_pref, i_i, c_i, np.sum(mutate_bools), len(centroid))) logging.debug('{}mutation... DONE'.format(loggin_pref)) return pop_o_t<|docstring|>Mutation of each gene with probability prob_mutation. If mutate, choose new gene value from corresponding range between min and max return : new child population O_t<|endoftext|>
13b2c033ef243f35b84ff7377915834e7db7acf29bdf720073754ce28abb9a25
def run_SGA(iter_num, data, labs, pop_num, prob_cross, prob_mutation, centers_num, adapt_function, dist_measure, log_dir='logs', loggin_pref=''): '\n Run the whole Simple Genetic Algotithm.\n \n iter_num : number of generations to calculate\n data : data to carry on experiment\n labs : true labels for data\n pop_num : number of individuals in population\n prob_cross : crossover probability\n prob_mutation : mutation probability\n centers_num : number of cluster to create\n adapt_function : {"silhouette"|"info_gain"}; these values to be returned\n dist_measure : {"euclidian"|"manhattan"|"cosine"} used for clustering and silhouette score\n log_dir : directory for results output\n\n return : [tuple] (iterations [list], scores [list of lists], \n generations [list of individuals], total_time [in seconds]), log_dir,\n list of lists max,min,avg,median scores, tuple with indices of the best individual \n ' logging.info('{}Simple Genetic Algotithm Run'.format(loggin_pref)) logging.info('{}============================'.format(loggin_pref)) timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') start_time = time.time() (iterations, scores, generations) = ([], [], []) (pop, maxima, minima) = init_population(data, centers_num, pop_num) pop_scores = get_adapt_scores(pop, data, labs, adapt_function=adapt_function, dist_measure=dist_measure) iterations.append(0) scores.append(pop_scores) generations.append(pop) pop_scores = np.array(pop_scores) logging.info('{}generation:\tmax \tmin \tmean \tmedian'.format(loggin_pref)) logging.info('{}gen {}/{}:\t{:.3}\t{:.3}\t{:.3}\t{:.3}'.format(loggin_pref, 0, iter_num, pop_scores.max(), pop_scores.min(), pop_scores.mean(), np.median(pop_scores))) for it in range(iter_num): log_pref = '{}gen {}/{}: '.format(loggin_pref, (it + 1), iter_num) pop = reproduction(pop, pop_scores, loggin_pref=log_pref) pop = crossover(pop, prob_cross, loggin_pref=log_pref) pop = mutation(pop, prob_mutation, min=minima, max=maxima, loggin_pref=log_pref) pop_scores = get_adapt_scores(pop, data, labs, adapt_function=adapt_function, dist_measure=dist_measure, loggin_pref=log_pref) iterations.append((it + 1)) scores.append(pop_scores) generations.append(pop) pop_scores = np.array(pop_scores) logging.info('{}gen {}/{}:\t{:.3}\t{:.3}\t{:.3}\t{:.3}'.format(loggin_pref, (it + 1), iter_num, pop_scores.max(), pop_scores.min(), pop_scores.mean(), np.median(pop_scores))) iterations = np.array(iterations) scores = np.array(scores) generations = np.array(generations) total_time = (time.time() - start_time) logging.debug('{}SGA RUN - DONE'.format(loggin_pref)) logging.info('{}writing log...'.format(loggin_pref)) log_dir = os.path.join(log_dir, '_'.join([timestamp, ('pop' + str(pop_num)), ('pc' + str(prob_cross)), ('pm' + str(prob_mutation)), ('centrs' + str(centers_num)), ('iters' + str(iter_num)), adapt_function, dist_measure, ('ds' + str(len(labs)))])) if (not os.path.isdir(log_dir)): os.makedirs(log_dir) with open(os.path.join(log_dir, (timestamp + '.txt')), 'w') as out_f: out_f.write('params:\n') out_f.write('iter_num:\t{}\n'.format(iter_num)) out_f.write('data shape:\t{}\n'.format(data.shape)) out_f.write('labs shape:\t{}\n'.format(labs.shape)) out_f.write('pop_num:\t{}\n'.format(pop_num)) out_f.write('prob_cross:\t{}\n'.format(prob_cross)) out_f.write('prob_mutation:\t{}\n'.format(prob_mutation)) out_f.write('centers_num:\t{}\n'.format(centers_num)) out_f.write('adapt_function:\t{}\n'.format(adapt_function)) out_f.write('dist_measure:\t{}\n'.format(dist_measure)) out_f.write('log_dir:\t{}\n'.format(log_dir)) out_f.write('-----------------------------------------------\n') out_f.write('results\n') out_f.write('best score:\t{}\n'.format(scores.max())) best_generation_ind = (scores.argmax() // scores.shape[1]) best_ind_inbest_gen = (scores.argmax() - (best_generation_ind * scores.shape[1])) out_f.write('best score (index):\tgeneration {}, individual {}\n'.format(best_generation_ind, best_ind_inbest_gen)) out_f.write('total_time:\t{}min{}s\n'.format((total_time // 60), str((total_time % 60))[:6])) logging.info('{}writing log... DONE. time: {}'.format(loggin_pref, total_time)) logging.info('{}saving experiment output...'.format(loggin_pref)) with open(os.path.join(log_dir, 'iterations.npy'), 'wb') as iters_f: np.save(iters_f, iterations) with open(os.path.join(log_dir, 'generations.npy'), 'wb') as gens_f: np.save(gens_f, generations) with open(os.path.join(log_dir, 'scores.npy'), 'wb') as scores_f: np.save(scores_f, scores) logging.info('{}saving experiment output... DONE'.format(loggin_pref)) return (iterations, scores, generations, total_time, log_dir, (best_generation_ind, best_ind_inbest_gen))
Run the whole Simple Genetic Algotithm. iter_num : number of generations to calculate data : data to carry on experiment labs : true labels for data pop_num : number of individuals in population prob_cross : crossover probability prob_mutation : mutation probability centers_num : number of cluster to create adapt_function : {"silhouette"|"info_gain"}; these values to be returned dist_measure : {"euclidian"|"manhattan"|"cosine"} used for clustering and silhouette score log_dir : directory for results output return : [tuple] (iterations [list], scores [list of lists], generations [list of individuals], total_time [in seconds]), log_dir, list of lists max,min,avg,median scores, tuple with indices of the best individual
evolclust/evolution.py
run_SGA
maks-ym/clustering-wtih-ewolution-strategy
0
python
def run_SGA(iter_num, data, labs, pop_num, prob_cross, prob_mutation, centers_num, adapt_function, dist_measure, log_dir='logs', loggin_pref=): '\n Run the whole Simple Genetic Algotithm.\n \n iter_num : number of generations to calculate\n data : data to carry on experiment\n labs : true labels for data\n pop_num : number of individuals in population\n prob_cross : crossover probability\n prob_mutation : mutation probability\n centers_num : number of cluster to create\n adapt_function : {"silhouette"|"info_gain"}; these values to be returned\n dist_measure : {"euclidian"|"manhattan"|"cosine"} used for clustering and silhouette score\n log_dir : directory for results output\n\n return : [tuple] (iterations [list], scores [list of lists], \n generations [list of individuals], total_time [in seconds]), log_dir,\n list of lists max,min,avg,median scores, tuple with indices of the best individual \n ' logging.info('{}Simple Genetic Algotithm Run'.format(loggin_pref)) logging.info('{}============================'.format(loggin_pref)) timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') start_time = time.time() (iterations, scores, generations) = ([], [], []) (pop, maxima, minima) = init_population(data, centers_num, pop_num) pop_scores = get_adapt_scores(pop, data, labs, adapt_function=adapt_function, dist_measure=dist_measure) iterations.append(0) scores.append(pop_scores) generations.append(pop) pop_scores = np.array(pop_scores) logging.info('{}generation:\tmax \tmin \tmean \tmedian'.format(loggin_pref)) logging.info('{}gen {}/{}:\t{:.3}\t{:.3}\t{:.3}\t{:.3}'.format(loggin_pref, 0, iter_num, pop_scores.max(), pop_scores.min(), pop_scores.mean(), np.median(pop_scores))) for it in range(iter_num): log_pref = '{}gen {}/{}: '.format(loggin_pref, (it + 1), iter_num) pop = reproduction(pop, pop_scores, loggin_pref=log_pref) pop = crossover(pop, prob_cross, loggin_pref=log_pref) pop = mutation(pop, prob_mutation, min=minima, max=maxima, loggin_pref=log_pref) pop_scores = get_adapt_scores(pop, data, labs, adapt_function=adapt_function, dist_measure=dist_measure, loggin_pref=log_pref) iterations.append((it + 1)) scores.append(pop_scores) generations.append(pop) pop_scores = np.array(pop_scores) logging.info('{}gen {}/{}:\t{:.3}\t{:.3}\t{:.3}\t{:.3}'.format(loggin_pref, (it + 1), iter_num, pop_scores.max(), pop_scores.min(), pop_scores.mean(), np.median(pop_scores))) iterations = np.array(iterations) scores = np.array(scores) generations = np.array(generations) total_time = (time.time() - start_time) logging.debug('{}SGA RUN - DONE'.format(loggin_pref)) logging.info('{}writing log...'.format(loggin_pref)) log_dir = os.path.join(log_dir, '_'.join([timestamp, ('pop' + str(pop_num)), ('pc' + str(prob_cross)), ('pm' + str(prob_mutation)), ('centrs' + str(centers_num)), ('iters' + str(iter_num)), adapt_function, dist_measure, ('ds' + str(len(labs)))])) if (not os.path.isdir(log_dir)): os.makedirs(log_dir) with open(os.path.join(log_dir, (timestamp + '.txt')), 'w') as out_f: out_f.write('params:\n') out_f.write('iter_num:\t{}\n'.format(iter_num)) out_f.write('data shape:\t{}\n'.format(data.shape)) out_f.write('labs shape:\t{}\n'.format(labs.shape)) out_f.write('pop_num:\t{}\n'.format(pop_num)) out_f.write('prob_cross:\t{}\n'.format(prob_cross)) out_f.write('prob_mutation:\t{}\n'.format(prob_mutation)) out_f.write('centers_num:\t{}\n'.format(centers_num)) out_f.write('adapt_function:\t{}\n'.format(adapt_function)) out_f.write('dist_measure:\t{}\n'.format(dist_measure)) out_f.write('log_dir:\t{}\n'.format(log_dir)) out_f.write('-----------------------------------------------\n') out_f.write('results\n') out_f.write('best score:\t{}\n'.format(scores.max())) best_generation_ind = (scores.argmax() // scores.shape[1]) best_ind_inbest_gen = (scores.argmax() - (best_generation_ind * scores.shape[1])) out_f.write('best score (index):\tgeneration {}, individual {}\n'.format(best_generation_ind, best_ind_inbest_gen)) out_f.write('total_time:\t{}min{}s\n'.format((total_time // 60), str((total_time % 60))[:6])) logging.info('{}writing log... DONE. time: {}'.format(loggin_pref, total_time)) logging.info('{}saving experiment output...'.format(loggin_pref)) with open(os.path.join(log_dir, 'iterations.npy'), 'wb') as iters_f: np.save(iters_f, iterations) with open(os.path.join(log_dir, 'generations.npy'), 'wb') as gens_f: np.save(gens_f, generations) with open(os.path.join(log_dir, 'scores.npy'), 'wb') as scores_f: np.save(scores_f, scores) logging.info('{}saving experiment output... DONE'.format(loggin_pref)) return (iterations, scores, generations, total_time, log_dir, (best_generation_ind, best_ind_inbest_gen))
def run_SGA(iter_num, data, labs, pop_num, prob_cross, prob_mutation, centers_num, adapt_function, dist_measure, log_dir='logs', loggin_pref=): '\n Run the whole Simple Genetic Algotithm.\n \n iter_num : number of generations to calculate\n data : data to carry on experiment\n labs : true labels for data\n pop_num : number of individuals in population\n prob_cross : crossover probability\n prob_mutation : mutation probability\n centers_num : number of cluster to create\n adapt_function : {"silhouette"|"info_gain"}; these values to be returned\n dist_measure : {"euclidian"|"manhattan"|"cosine"} used for clustering and silhouette score\n log_dir : directory for results output\n\n return : [tuple] (iterations [list], scores [list of lists], \n generations [list of individuals], total_time [in seconds]), log_dir,\n list of lists max,min,avg,median scores, tuple with indices of the best individual \n ' logging.info('{}Simple Genetic Algotithm Run'.format(loggin_pref)) logging.info('{}============================'.format(loggin_pref)) timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') start_time = time.time() (iterations, scores, generations) = ([], [], []) (pop, maxima, minima) = init_population(data, centers_num, pop_num) pop_scores = get_adapt_scores(pop, data, labs, adapt_function=adapt_function, dist_measure=dist_measure) iterations.append(0) scores.append(pop_scores) generations.append(pop) pop_scores = np.array(pop_scores) logging.info('{}generation:\tmax \tmin \tmean \tmedian'.format(loggin_pref)) logging.info('{}gen {}/{}:\t{:.3}\t{:.3}\t{:.3}\t{:.3}'.format(loggin_pref, 0, iter_num, pop_scores.max(), pop_scores.min(), pop_scores.mean(), np.median(pop_scores))) for it in range(iter_num): log_pref = '{}gen {}/{}: '.format(loggin_pref, (it + 1), iter_num) pop = reproduction(pop, pop_scores, loggin_pref=log_pref) pop = crossover(pop, prob_cross, loggin_pref=log_pref) pop = mutation(pop, prob_mutation, min=minima, max=maxima, loggin_pref=log_pref) pop_scores = get_adapt_scores(pop, data, labs, adapt_function=adapt_function, dist_measure=dist_measure, loggin_pref=log_pref) iterations.append((it + 1)) scores.append(pop_scores) generations.append(pop) pop_scores = np.array(pop_scores) logging.info('{}gen {}/{}:\t{:.3}\t{:.3}\t{:.3}\t{:.3}'.format(loggin_pref, (it + 1), iter_num, pop_scores.max(), pop_scores.min(), pop_scores.mean(), np.median(pop_scores))) iterations = np.array(iterations) scores = np.array(scores) generations = np.array(generations) total_time = (time.time() - start_time) logging.debug('{}SGA RUN - DONE'.format(loggin_pref)) logging.info('{}writing log...'.format(loggin_pref)) log_dir = os.path.join(log_dir, '_'.join([timestamp, ('pop' + str(pop_num)), ('pc' + str(prob_cross)), ('pm' + str(prob_mutation)), ('centrs' + str(centers_num)), ('iters' + str(iter_num)), adapt_function, dist_measure, ('ds' + str(len(labs)))])) if (not os.path.isdir(log_dir)): os.makedirs(log_dir) with open(os.path.join(log_dir, (timestamp + '.txt')), 'w') as out_f: out_f.write('params:\n') out_f.write('iter_num:\t{}\n'.format(iter_num)) out_f.write('data shape:\t{}\n'.format(data.shape)) out_f.write('labs shape:\t{}\n'.format(labs.shape)) out_f.write('pop_num:\t{}\n'.format(pop_num)) out_f.write('prob_cross:\t{}\n'.format(prob_cross)) out_f.write('prob_mutation:\t{}\n'.format(prob_mutation)) out_f.write('centers_num:\t{}\n'.format(centers_num)) out_f.write('adapt_function:\t{}\n'.format(adapt_function)) out_f.write('dist_measure:\t{}\n'.format(dist_measure)) out_f.write('log_dir:\t{}\n'.format(log_dir)) out_f.write('-----------------------------------------------\n') out_f.write('results\n') out_f.write('best score:\t{}\n'.format(scores.max())) best_generation_ind = (scores.argmax() // scores.shape[1]) best_ind_inbest_gen = (scores.argmax() - (best_generation_ind * scores.shape[1])) out_f.write('best score (index):\tgeneration {}, individual {}\n'.format(best_generation_ind, best_ind_inbest_gen)) out_f.write('total_time:\t{}min{}s\n'.format((total_time // 60), str((total_time % 60))[:6])) logging.info('{}writing log... DONE. time: {}'.format(loggin_pref, total_time)) logging.info('{}saving experiment output...'.format(loggin_pref)) with open(os.path.join(log_dir, 'iterations.npy'), 'wb') as iters_f: np.save(iters_f, iterations) with open(os.path.join(log_dir, 'generations.npy'), 'wb') as gens_f: np.save(gens_f, generations) with open(os.path.join(log_dir, 'scores.npy'), 'wb') as scores_f: np.save(scores_f, scores) logging.info('{}saving experiment output... DONE'.format(loggin_pref)) return (iterations, scores, generations, total_time, log_dir, (best_generation_ind, best_ind_inbest_gen))<|docstring|>Run the whole Simple Genetic Algotithm. iter_num : number of generations to calculate data : data to carry on experiment labs : true labels for data pop_num : number of individuals in population prob_cross : crossover probability prob_mutation : mutation probability centers_num : number of cluster to create adapt_function : {"silhouette"|"info_gain"}; these values to be returned dist_measure : {"euclidian"|"manhattan"|"cosine"} used for clustering and silhouette score log_dir : directory for results output return : [tuple] (iterations [list], scores [list of lists], generations [list of individuals], total_time [in seconds]), log_dir, list of lists max,min,avg,median scores, tuple with indices of the best individual<|endoftext|>
fdc95c52cbec3a87f642f1ffebbf4f553c7ab12fe1a653cf9eff14beef9208d1
def __init__(self, workload_mat: WorkloadMatrix, load: WorkloadSpace, cost_per_buffer: StateSpace, model_type: str, strategic_idling_params: Optional[StrategicIdlingParams]=None, debug_info: bool=False) -> None: "\n :param workload_mat: workload matrix, with rows being workload vectors.\n :param load: vector with loads for every workload vector.\n :param cost_per_buffer: cost per unit of inventory per buffer.\n :param model_type: String indicating if this is a `'pull'` or `'push'` model.\n :param strategic_idling_params: tolerance levels and convex solver choice for navigating\n effective cost space.\n :param debug_info: Boolean flag that indicates whether printing useful debug info.\n " self._workload_mat = workload_mat self._load = load self._cost_per_buffer = cost_per_buffer assert (model_type in ['push', 'pull']) self.model_type = model_type self.check_strategic_idling_parameters(strategic_idling_params) self.strategic_idling_params = strategic_idling_params self.debug_info = debug_info (self._num_bottlenecks, self._num_buffers) = workload_mat.shape convex_solver = strategic_idling_params.convex_solver self.c_bar_solver = ComputeDualEffectiveCost(workload_mat, cost_per_buffer, convex_solver) (self._w_star_lp_problem, self._x_star, self._w_param) = self._create_find_workload_with_min_eff_cost_by_idling_lp_program()
:param workload_mat: workload matrix, with rows being workload vectors. :param load: vector with loads for every workload vector. :param cost_per_buffer: cost per unit of inventory per buffer. :param model_type: String indicating if this is a `'pull'` or `'push'` model. :param strategic_idling_params: tolerance levels and convex solver choice for navigating effective cost space. :param debug_info: Boolean flag that indicates whether printing useful debug info.
src/snc/agents/hedgehog/strategic_idling/strategic_idling.py
__init__
dmcnamee/snc
5
python
def __init__(self, workload_mat: WorkloadMatrix, load: WorkloadSpace, cost_per_buffer: StateSpace, model_type: str, strategic_idling_params: Optional[StrategicIdlingParams]=None, debug_info: bool=False) -> None: "\n :param workload_mat: workload matrix, with rows being workload vectors.\n :param load: vector with loads for every workload vector.\n :param cost_per_buffer: cost per unit of inventory per buffer.\n :param model_type: String indicating if this is a `'pull'` or `'push'` model.\n :param strategic_idling_params: tolerance levels and convex solver choice for navigating\n effective cost space.\n :param debug_info: Boolean flag that indicates whether printing useful debug info.\n " self._workload_mat = workload_mat self._load = load self._cost_per_buffer = cost_per_buffer assert (model_type in ['push', 'pull']) self.model_type = model_type self.check_strategic_idling_parameters(strategic_idling_params) self.strategic_idling_params = strategic_idling_params self.debug_info = debug_info (self._num_bottlenecks, self._num_buffers) = workload_mat.shape convex_solver = strategic_idling_params.convex_solver self.c_bar_solver = ComputeDualEffectiveCost(workload_mat, cost_per_buffer, convex_solver) (self._w_star_lp_problem, self._x_star, self._w_param) = self._create_find_workload_with_min_eff_cost_by_idling_lp_program()
def __init__(self, workload_mat: WorkloadMatrix, load: WorkloadSpace, cost_per_buffer: StateSpace, model_type: str, strategic_idling_params: Optional[StrategicIdlingParams]=None, debug_info: bool=False) -> None: "\n :param workload_mat: workload matrix, with rows being workload vectors.\n :param load: vector with loads for every workload vector.\n :param cost_per_buffer: cost per unit of inventory per buffer.\n :param model_type: String indicating if this is a `'pull'` or `'push'` model.\n :param strategic_idling_params: tolerance levels and convex solver choice for navigating\n effective cost space.\n :param debug_info: Boolean flag that indicates whether printing useful debug info.\n " self._workload_mat = workload_mat self._load = load self._cost_per_buffer = cost_per_buffer assert (model_type in ['push', 'pull']) self.model_type = model_type self.check_strategic_idling_parameters(strategic_idling_params) self.strategic_idling_params = strategic_idling_params self.debug_info = debug_info (self._num_bottlenecks, self._num_buffers) = workload_mat.shape convex_solver = strategic_idling_params.convex_solver self.c_bar_solver = ComputeDualEffectiveCost(workload_mat, cost_per_buffer, convex_solver) (self._w_star_lp_problem, self._x_star, self._w_param) = self._create_find_workload_with_min_eff_cost_by_idling_lp_program()<|docstring|>:param workload_mat: workload matrix, with rows being workload vectors. :param load: vector with loads for every workload vector. :param cost_per_buffer: cost per unit of inventory per buffer. :param model_type: String indicating if this is a `'pull'` or `'push'` model. :param strategic_idling_params: tolerance levels and convex solver choice for navigating effective cost space. :param debug_info: Boolean flag that indicates whether printing useful debug info.<|endoftext|>
8cf195c75817ff6b9f96743309158d99b20befbcb38c32499ee29c548d3b498a
def _get_null_strategic_idling_output(self, **overrides) -> StrategicIdlingOutput: '\n There is no need to compute the idling directions when we are in the monotone region, so we\n return null values.\n\n :return: (beta_star, k_idling_set, sigma_2_h, psi_plus, ...)\n - w: current state in workload space, i.e. w = Xi x.\n - beta_star: hedging threshold.\n - k_idling_set: set of possibly idling directions.\n - sigma_2_h: asymptotic variance of the 1-dim "height" process.\n - psi_plus: vector normal to the closest face.\n - height_process: current state of height process variable\n - w_star: Projection of workload on to the nearest cone face\n - c_plus: dual cost vector in the monotone region\n - c_bar: dual cost vector at the current workload\n - psi_plus_cone_list: list of encountered psi_plus vectors\n - beta_star_cone_list: list of encountered beta_star values\n - delta_h: drift of the 1-dim "height" process\n - lambda_star: the price of random oscillations along the closest face\n - theta_roots: root of quadratic equation used for computing hedging\n ' assert ('w' in overrides), 'Current workload variable is not being returned' w = overrides['w'] beta_star = overrides.get('beta_star', 0) k_idling_set = overrides.get('k_idling_set', np.array([])) sigma_2_h = overrides.get('sigma_2_h', 0) psi_plus = overrides.get('psi_plus', None) height_process = overrides.get('height_process', 0.0) w_star = overrides.get('w_star', None) c_plus = overrides.get('c_plus', None) c_bar = overrides.get('c_bar', None) psi_plus_cone_list = overrides.get('psi_plus_cone_list', getattr(self, 'psi_plus_cone_list', [])) beta_star_cone_list = overrides.get('beta_star_cone_list', getattr(self, 'beta_star_cone_list', [])) delta_h = overrides.get('delta_h', 0) lambda_star = overrides.get('lambda_star', 0) theta_roots = overrides.get('theta_roots', None) return StrategicIdlingOutput(w, beta_star, k_idling_set, sigma_2_h, psi_plus, height_process, w_star, c_plus, c_bar, psi_plus_cone_list, beta_star_cone_list, delta_h, lambda_star, theta_roots)
There is no need to compute the idling directions when we are in the monotone region, so we return null values. :return: (beta_star, k_idling_set, sigma_2_h, psi_plus, ...) - w: current state in workload space, i.e. w = Xi x. - beta_star: hedging threshold. - k_idling_set: set of possibly idling directions. - sigma_2_h: asymptotic variance of the 1-dim "height" process. - psi_plus: vector normal to the closest face. - height_process: current state of height process variable - w_star: Projection of workload on to the nearest cone face - c_plus: dual cost vector in the monotone region - c_bar: dual cost vector at the current workload - psi_plus_cone_list: list of encountered psi_plus vectors - beta_star_cone_list: list of encountered beta_star values - delta_h: drift of the 1-dim "height" process - lambda_star: the price of random oscillations along the closest face - theta_roots: root of quadratic equation used for computing hedging
src/snc/agents/hedgehog/strategic_idling/strategic_idling.py
_get_null_strategic_idling_output
dmcnamee/snc
5
python
def _get_null_strategic_idling_output(self, **overrides) -> StrategicIdlingOutput: '\n There is no need to compute the idling directions when we are in the monotone region, so we\n return null values.\n\n :return: (beta_star, k_idling_set, sigma_2_h, psi_plus, ...)\n - w: current state in workload space, i.e. w = Xi x.\n - beta_star: hedging threshold.\n - k_idling_set: set of possibly idling directions.\n - sigma_2_h: asymptotic variance of the 1-dim "height" process.\n - psi_plus: vector normal to the closest face.\n - height_process: current state of height process variable\n - w_star: Projection of workload on to the nearest cone face\n - c_plus: dual cost vector in the monotone region\n - c_bar: dual cost vector at the current workload\n - psi_plus_cone_list: list of encountered psi_plus vectors\n - beta_star_cone_list: list of encountered beta_star values\n - delta_h: drift of the 1-dim "height" process\n - lambda_star: the price of random oscillations along the closest face\n - theta_roots: root of quadratic equation used for computing hedging\n ' assert ('w' in overrides), 'Current workload variable is not being returned' w = overrides['w'] beta_star = overrides.get('beta_star', 0) k_idling_set = overrides.get('k_idling_set', np.array([])) sigma_2_h = overrides.get('sigma_2_h', 0) psi_plus = overrides.get('psi_plus', None) height_process = overrides.get('height_process', 0.0) w_star = overrides.get('w_star', None) c_plus = overrides.get('c_plus', None) c_bar = overrides.get('c_bar', None) psi_plus_cone_list = overrides.get('psi_plus_cone_list', getattr(self, 'psi_plus_cone_list', [])) beta_star_cone_list = overrides.get('beta_star_cone_list', getattr(self, 'beta_star_cone_list', [])) delta_h = overrides.get('delta_h', 0) lambda_star = overrides.get('lambda_star', 0) theta_roots = overrides.get('theta_roots', None) return StrategicIdlingOutput(w, beta_star, k_idling_set, sigma_2_h, psi_plus, height_process, w_star, c_plus, c_bar, psi_plus_cone_list, beta_star_cone_list, delta_h, lambda_star, theta_roots)
def _get_null_strategic_idling_output(self, **overrides) -> StrategicIdlingOutput: '\n There is no need to compute the idling directions when we are in the monotone region, so we\n return null values.\n\n :return: (beta_star, k_idling_set, sigma_2_h, psi_plus, ...)\n - w: current state in workload space, i.e. w = Xi x.\n - beta_star: hedging threshold.\n - k_idling_set: set of possibly idling directions.\n - sigma_2_h: asymptotic variance of the 1-dim "height" process.\n - psi_plus: vector normal to the closest face.\n - height_process: current state of height process variable\n - w_star: Projection of workload on to the nearest cone face\n - c_plus: dual cost vector in the monotone region\n - c_bar: dual cost vector at the current workload\n - psi_plus_cone_list: list of encountered psi_plus vectors\n - beta_star_cone_list: list of encountered beta_star values\n - delta_h: drift of the 1-dim "height" process\n - lambda_star: the price of random oscillations along the closest face\n - theta_roots: root of quadratic equation used for computing hedging\n ' assert ('w' in overrides), 'Current workload variable is not being returned' w = overrides['w'] beta_star = overrides.get('beta_star', 0) k_idling_set = overrides.get('k_idling_set', np.array([])) sigma_2_h = overrides.get('sigma_2_h', 0) psi_plus = overrides.get('psi_plus', None) height_process = overrides.get('height_process', 0.0) w_star = overrides.get('w_star', None) c_plus = overrides.get('c_plus', None) c_bar = overrides.get('c_bar', None) psi_plus_cone_list = overrides.get('psi_plus_cone_list', getattr(self, 'psi_plus_cone_list', [])) beta_star_cone_list = overrides.get('beta_star_cone_list', getattr(self, 'beta_star_cone_list', [])) delta_h = overrides.get('delta_h', 0) lambda_star = overrides.get('lambda_star', 0) theta_roots = overrides.get('theta_roots', None) return StrategicIdlingOutput(w, beta_star, k_idling_set, sigma_2_h, psi_plus, height_process, w_star, c_plus, c_bar, psi_plus_cone_list, beta_star_cone_list, delta_h, lambda_star, theta_roots)<|docstring|>There is no need to compute the idling directions when we are in the monotone region, so we return null values. :return: (beta_star, k_idling_set, sigma_2_h, psi_plus, ...) - w: current state in workload space, i.e. w = Xi x. - beta_star: hedging threshold. - k_idling_set: set of possibly idling directions. - sigma_2_h: asymptotic variance of the 1-dim "height" process. - psi_plus: vector normal to the closest face. - height_process: current state of height process variable - w_star: Projection of workload on to the nearest cone face - c_plus: dual cost vector in the monotone region - c_bar: dual cost vector at the current workload - psi_plus_cone_list: list of encountered psi_plus vectors - beta_star_cone_list: list of encountered beta_star values - delta_h: drift of the 1-dim "height" process - lambda_star: the price of random oscillations along the closest face - theta_roots: root of quadratic equation used for computing hedging<|endoftext|>
061b604aa58f32b09504c5f1a35bbcfac7c6aaf494efd1f37eb38ee8cb15a9ca
@staticmethod def _is_decision_not_to_idle(k_idling_set: np.ndarray) -> bool: '\n Method determines whether all the bottlenecks have already been eliminated\n from possible idling action.\n\n :param k_idling_set: a set of bottlenecks that are permitted to idle.\n :return: bool\n ' return (k_idling_set.size == 0)
Method determines whether all the bottlenecks have already been eliminated from possible idling action. :param k_idling_set: a set of bottlenecks that are permitted to idle. :return: bool
src/snc/agents/hedgehog/strategic_idling/strategic_idling.py
_is_decision_not_to_idle
dmcnamee/snc
5
python
@staticmethod def _is_decision_not_to_idle(k_idling_set: np.ndarray) -> bool: '\n Method determines whether all the bottlenecks have already been eliminated\n from possible idling action.\n\n :param k_idling_set: a set of bottlenecks that are permitted to idle.\n :return: bool\n ' return (k_idling_set.size == 0)
@staticmethod def _is_decision_not_to_idle(k_idling_set: np.ndarray) -> bool: '\n Method determines whether all the bottlenecks have already been eliminated\n from possible idling action.\n\n :param k_idling_set: a set of bottlenecks that are permitted to idle.\n :return: bool\n ' return (k_idling_set.size == 0)<|docstring|>Method determines whether all the bottlenecks have already been eliminated from possible idling action. :param k_idling_set: a set of bottlenecks that are permitted to idle. :return: bool<|endoftext|>
408557b41585b17350463ad59186c8ccb3564f1402aba81298b1219c95eeaa63
@staticmethod def _is_1d_workload_relaxation(w: WorkloadSpace) -> bool: '\n Check if the current workload relaxation occurs in one dimension.\n\n :param w: current state in workload space, i.e. w = Xi @ x.\n :return: bool\n ' return (w.shape[0] == 1)
Check if the current workload relaxation occurs in one dimension. :param w: current state in workload space, i.e. w = Xi @ x. :return: bool
src/snc/agents/hedgehog/strategic_idling/strategic_idling.py
_is_1d_workload_relaxation
dmcnamee/snc
5
python
@staticmethod def _is_1d_workload_relaxation(w: WorkloadSpace) -> bool: '\n Check if the current workload relaxation occurs in one dimension.\n\n :param w: current state in workload space, i.e. w = Xi @ x.\n :return: bool\n ' return (w.shape[0] == 1)
@staticmethod def _is_1d_workload_relaxation(w: WorkloadSpace) -> bool: '\n Check if the current workload relaxation occurs in one dimension.\n\n :param w: current state in workload space, i.e. w = Xi @ x.\n :return: bool\n ' return (w.shape[0] == 1)<|docstring|>Check if the current workload relaxation occurs in one dimension. :param w: current state in workload space, i.e. w = Xi @ x. :return: bool<|endoftext|>
b17c194b3cbd3f3402a6549d27f380517a33dd6ad9c39d9a8248c27d00b657e9
@staticmethod def _is_negative_orthant(w: WorkloadSpace, eps: float=1e-06) -> bool: '\n Check if current workload state, w, is in the negative quadrant, i.e.,\n all its components are nonpositive and at least one is strictly negative.\n\n :param w: current state in workload space, i.e. w = Xi @ x.\n :param eps: tolerance value to check that we are not in the negative quadrant.\n :return: bool\n ' return bool((np.all((w <= eps)) and np.any((w < (- eps)))))
Check if current workload state, w, is in the negative quadrant, i.e., all its components are nonpositive and at least one is strictly negative. :param w: current state in workload space, i.e. w = Xi @ x. :param eps: tolerance value to check that we are not in the negative quadrant. :return: bool
src/snc/agents/hedgehog/strategic_idling/strategic_idling.py
_is_negative_orthant
dmcnamee/snc
5
python
@staticmethod def _is_negative_orthant(w: WorkloadSpace, eps: float=1e-06) -> bool: '\n Check if current workload state, w, is in the negative quadrant, i.e.,\n all its components are nonpositive and at least one is strictly negative.\n\n :param w: current state in workload space, i.e. w = Xi @ x.\n :param eps: tolerance value to check that we are not in the negative quadrant.\n :return: bool\n ' return bool((np.all((w <= eps)) and np.any((w < (- eps)))))
@staticmethod def _is_negative_orthant(w: WorkloadSpace, eps: float=1e-06) -> bool: '\n Check if current workload state, w, is in the negative quadrant, i.e.,\n all its components are nonpositive and at least one is strictly negative.\n\n :param w: current state in workload space, i.e. w = Xi @ x.\n :param eps: tolerance value to check that we are not in the negative quadrant.\n :return: bool\n ' return bool((np.all((w <= eps)) and np.any((w < (- eps)))))<|docstring|>Check if current workload state, w, is in the negative quadrant, i.e., all its components are nonpositive and at least one is strictly negative. :param w: current state in workload space, i.e. w = Xi @ x. :param eps: tolerance value to check that we are not in the negative quadrant. :return: bool<|endoftext|>
bc38537366c7a5290f87af2bdad244e12a9754d1bd8f86af0fa1b41b990513e6
@staticmethod def _is_infeasible(c_bar: Optional[WorkloadSpace]) -> bool: '\n We know that a given w is infeasible if the corresponding c_bar vector (solution to the dual\n of the effective cost) is None\n\n :param c_bar: vector defining a level set.\n :return: True if the c_bar vector is None. False otherwise.\n ' return (c_bar is None)
We know that a given w is infeasible if the corresponding c_bar vector (solution to the dual of the effective cost) is None :param c_bar: vector defining a level set. :return: True if the c_bar vector is None. False otherwise.
src/snc/agents/hedgehog/strategic_idling/strategic_idling.py
_is_infeasible
dmcnamee/snc
5
python
@staticmethod def _is_infeasible(c_bar: Optional[WorkloadSpace]) -> bool: '\n We know that a given w is infeasible if the corresponding c_bar vector (solution to the dual\n of the effective cost) is None\n\n :param c_bar: vector defining a level set.\n :return: True if the c_bar vector is None. False otherwise.\n ' return (c_bar is None)
@staticmethod def _is_infeasible(c_bar: Optional[WorkloadSpace]) -> bool: '\n We know that a given w is infeasible if the corresponding c_bar vector (solution to the dual\n of the effective cost) is None\n\n :param c_bar: vector defining a level set.\n :return: True if the c_bar vector is None. False otherwise.\n ' return (c_bar is None)<|docstring|>We know that a given w is infeasible if the corresponding c_bar vector (solution to the dual of the effective cost) is None :param c_bar: vector defining a level set. :return: True if the c_bar vector is None. False otherwise.<|endoftext|>
1d7df1a421183009dd3087088e04d9522ed0265c8bc284962be1928352de25e4
@staticmethod def _is_defining_a_monotone_region(c_bar: WorkloadSpace, eps: float=1e-07) -> bool: '\n Checks whether a c_bar vector defines a level set in the region where the effective cost is\n monotone. If c_bar is componentwise nonnegative, then it defines a level set where\n any increment in the workload makes an increment in the effective cost.\n\n :param c_bar: vector normal to the level set.\n :param eps: tolerance for assessing that a component is null.\n :return: True if c_bar defines a level set of the monotone region. False otherwise.\n ' return bool(np.all((c_bar >= (- eps))))
Checks whether a c_bar vector defines a level set in the region where the effective cost is monotone. If c_bar is componentwise nonnegative, then it defines a level set where any increment in the workload makes an increment in the effective cost. :param c_bar: vector normal to the level set. :param eps: tolerance for assessing that a component is null. :return: True if c_bar defines a level set of the monotone region. False otherwise.
src/snc/agents/hedgehog/strategic_idling/strategic_idling.py
_is_defining_a_monotone_region
dmcnamee/snc
5
python
@staticmethod def _is_defining_a_monotone_region(c_bar: WorkloadSpace, eps: float=1e-07) -> bool: '\n Checks whether a c_bar vector defines a level set in the region where the effective cost is\n monotone. If c_bar is componentwise nonnegative, then it defines a level set where\n any increment in the workload makes an increment in the effective cost.\n\n :param c_bar: vector normal to the level set.\n :param eps: tolerance for assessing that a component is null.\n :return: True if c_bar defines a level set of the monotone region. False otherwise.\n ' return bool(np.all((c_bar >= (- eps))))
@staticmethod def _is_defining_a_monotone_region(c_bar: WorkloadSpace, eps: float=1e-07) -> bool: '\n Checks whether a c_bar vector defines a level set in the region where the effective cost is\n monotone. If c_bar is componentwise nonnegative, then it defines a level set where\n any increment in the workload makes an increment in the effective cost.\n\n :param c_bar: vector normal to the level set.\n :param eps: tolerance for assessing that a component is null.\n :return: True if c_bar defines a level set of the monotone region. False otherwise.\n ' return bool(np.all((c_bar >= (- eps))))<|docstring|>Checks whether a c_bar vector defines a level set in the region where the effective cost is monotone. If c_bar is componentwise nonnegative, then it defines a level set where any increment in the workload makes an increment in the effective cost. :param c_bar: vector normal to the level set. :param eps: tolerance for assessing that a component is null. :return: True if c_bar defines a level set of the monotone region. False otherwise.<|endoftext|>
f0d096dcf0c2ecf4ad9e6b1db24a031872652e1fa60d35b7abe7c9372cc51631
@staticmethod def _is_w_inside_monotone_region(w: WorkloadSpace, w_star: WorkloadSpace, c_bar: WorkloadSpace) -> bool: '\n Given a workload, w, and its projection onto the monotone region, w_star, it checks whether\n w is already inside the monotone region. When w == w_star, then we conclude that there is no\n other workload greater than w that reduces the effective cost. However, depending on the\n actual instance of the LP that we solved in order to obtain w_star, and the solver we used,\n it could happen that even when w != w_star, they still have the same effective cost.\n So a more robust test to check whether we are inside the monotone region, is to check\n whether w and w_star give the same effective cost. Recall that the effective cost can\n be quickly computed for any w by doing the dot product:\n c_bar.T @ w\n\n :param w: current state in workload space, i.e. w = Xi @ x.\n :param w_star: projection of w onto the monotone region.\n :param c_bar: vector normal to the level set.\n :return: True if w is in the monotone region. False otherwise.\n ' return (np.abs((c_bar.T @ (w_star - w))) < 0.001)
Given a workload, w, and its projection onto the monotone region, w_star, it checks whether w is already inside the monotone region. When w == w_star, then we conclude that there is no other workload greater than w that reduces the effective cost. However, depending on the actual instance of the LP that we solved in order to obtain w_star, and the solver we used, it could happen that even when w != w_star, they still have the same effective cost. So a more robust test to check whether we are inside the monotone region, is to check whether w and w_star give the same effective cost. Recall that the effective cost can be quickly computed for any w by doing the dot product: c_bar.T @ w :param w: current state in workload space, i.e. w = Xi @ x. :param w_star: projection of w onto the monotone region. :param c_bar: vector normal to the level set. :return: True if w is in the monotone region. False otherwise.
src/snc/agents/hedgehog/strategic_idling/strategic_idling.py
_is_w_inside_monotone_region
dmcnamee/snc
5
python
@staticmethod def _is_w_inside_monotone_region(w: WorkloadSpace, w_star: WorkloadSpace, c_bar: WorkloadSpace) -> bool: '\n Given a workload, w, and its projection onto the monotone region, w_star, it checks whether\n w is already inside the monotone region. When w == w_star, then we conclude that there is no\n other workload greater than w that reduces the effective cost. However, depending on the\n actual instance of the LP that we solved in order to obtain w_star, and the solver we used,\n it could happen that even when w != w_star, they still have the same effective cost.\n So a more robust test to check whether we are inside the monotone region, is to check\n whether w and w_star give the same effective cost. Recall that the effective cost can\n be quickly computed for any w by doing the dot product:\n c_bar.T @ w\n\n :param w: current state in workload space, i.e. w = Xi @ x.\n :param w_star: projection of w onto the monotone region.\n :param c_bar: vector normal to the level set.\n :return: True if w is in the monotone region. False otherwise.\n ' return (np.abs((c_bar.T @ (w_star - w))) < 0.001)
@staticmethod def _is_w_inside_monotone_region(w: WorkloadSpace, w_star: WorkloadSpace, c_bar: WorkloadSpace) -> bool: '\n Given a workload, w, and its projection onto the monotone region, w_star, it checks whether\n w is already inside the monotone region. When w == w_star, then we conclude that there is no\n other workload greater than w that reduces the effective cost. However, depending on the\n actual instance of the LP that we solved in order to obtain w_star, and the solver we used,\n it could happen that even when w != w_star, they still have the same effective cost.\n So a more robust test to check whether we are inside the monotone region, is to check\n whether w and w_star give the same effective cost. Recall that the effective cost can\n be quickly computed for any w by doing the dot product:\n c_bar.T @ w\n\n :param w: current state in workload space, i.e. w = Xi @ x.\n :param w_star: projection of w onto the monotone region.\n :param c_bar: vector normal to the level set.\n :return: True if w is in the monotone region. False otherwise.\n ' return (np.abs((c_bar.T @ (w_star - w))) < 0.001)<|docstring|>Given a workload, w, and its projection onto the monotone region, w_star, it checks whether w is already inside the monotone region. When w == w_star, then we conclude that there is no other workload greater than w that reduces the effective cost. However, depending on the actual instance of the LP that we solved in order to obtain w_star, and the solver we used, it could happen that even when w != w_star, they still have the same effective cost. So a more robust test to check whether we are inside the monotone region, is to check whether w and w_star give the same effective cost. Recall that the effective cost can be quickly computed for any w by doing the dot product: c_bar.T @ w :param w: current state in workload space, i.e. w = Xi @ x. :param w_star: projection of w onto the monotone region. :param c_bar: vector normal to the level set. :return: True if w is in the monotone region. False otherwise.<|endoftext|>
cf8f28252a19e8ba809c1cf2807d70ae314d046008bb6205bfb406ed640a5fce
def _get_level_set_for_current_workload(self, w: WorkloadSpace) -> Optional[WorkloadSpace]: '\n The effective cost can be represented as a piecewise linear function,\n with coefficients given by the vertexes of the feasible set of the dual\n program of the LP that computes the effective cost. Indeed, the solution to such\n dual program for a given w, gives the linear coefficient at w.\n\n :param w: current state in workload space, i.e. w = Xi x.\n :return: c_bar: vector defining level set of the effective cost at current w. None is\n returned if the optimisation is unsuccessful.\n ' (c_bar, _, _) = self.c_bar_solver.solve(w) return c_bar
The effective cost can be represented as a piecewise linear function, with coefficients given by the vertexes of the feasible set of the dual program of the LP that computes the effective cost. Indeed, the solution to such dual program for a given w, gives the linear coefficient at w. :param w: current state in workload space, i.e. w = Xi x. :return: c_bar: vector defining level set of the effective cost at current w. None is returned if the optimisation is unsuccessful.
src/snc/agents/hedgehog/strategic_idling/strategic_idling.py
_get_level_set_for_current_workload
dmcnamee/snc
5
python
def _get_level_set_for_current_workload(self, w: WorkloadSpace) -> Optional[WorkloadSpace]: '\n The effective cost can be represented as a piecewise linear function,\n with coefficients given by the vertexes of the feasible set of the dual\n program of the LP that computes the effective cost. Indeed, the solution to such\n dual program for a given w, gives the linear coefficient at w.\n\n :param w: current state in workload space, i.e. w = Xi x.\n :return: c_bar: vector defining level set of the effective cost at current w. None is\n returned if the optimisation is unsuccessful.\n ' (c_bar, _, _) = self.c_bar_solver.solve(w) return c_bar
def _get_level_set_for_current_workload(self, w: WorkloadSpace) -> Optional[WorkloadSpace]: '\n The effective cost can be represented as a piecewise linear function,\n with coefficients given by the vertexes of the feasible set of the dual\n program of the LP that computes the effective cost. Indeed, the solution to such\n dual program for a given w, gives the linear coefficient at w.\n\n :param w: current state in workload space, i.e. w = Xi x.\n :return: c_bar: vector defining level set of the effective cost at current w. None is\n returned if the optimisation is unsuccessful.\n ' (c_bar, _, _) = self.c_bar_solver.solve(w) return c_bar<|docstring|>The effective cost can be represented as a piecewise linear function, with coefficients given by the vertexes of the feasible set of the dual program of the LP that computes the effective cost. Indeed, the solution to such dual program for a given w, gives the linear coefficient at w. :param w: current state in workload space, i.e. w = Xi x. :return: c_bar: vector defining level set of the effective cost at current w. None is returned if the optimisation is unsuccessful.<|endoftext|>
0b325f4697d6709d8dfc690786cedb70731c1b7139aab8c815468a5a646bd562
@staticmethod def _get_vector_defining_possible_idling_direction(w_star: WorkloadSpace, w: WorkloadSpace) -> WorkloadSpace: '\n Returns vector in the projection direction from w to w_star, which defines the resources in\n which we might be able to relax the nonidling constraints\n\n :param w_star: projection of w onto the closest face along the direction of minimum cost.\n :param w: current state in workload space, i.e. w = Xi @ x.\n :return: v_star: vector in the projection direction from w to w_star. If we are in\n the monotone region, then w = w_star and v_star is a vector of np.nan.\n ' return (w_star - w)
Returns vector in the projection direction from w to w_star, which defines the resources in which we might be able to relax the nonidling constraints :param w_star: projection of w onto the closest face along the direction of minimum cost. :param w: current state in workload space, i.e. w = Xi @ x. :return: v_star: vector in the projection direction from w to w_star. If we are in the monotone region, then w = w_star and v_star is a vector of np.nan.
src/snc/agents/hedgehog/strategic_idling/strategic_idling.py
_get_vector_defining_possible_idling_direction
dmcnamee/snc
5
python
@staticmethod def _get_vector_defining_possible_idling_direction(w_star: WorkloadSpace, w: WorkloadSpace) -> WorkloadSpace: '\n Returns vector in the projection direction from w to w_star, which defines the resources in\n which we might be able to relax the nonidling constraints\n\n :param w_star: projection of w onto the closest face along the direction of minimum cost.\n :param w: current state in workload space, i.e. w = Xi @ x.\n :return: v_star: vector in the projection direction from w to w_star. If we are in\n the monotone region, then w = w_star and v_star is a vector of np.nan.\n ' return (w_star - w)
@staticmethod def _get_vector_defining_possible_idling_direction(w_star: WorkloadSpace, w: WorkloadSpace) -> WorkloadSpace: '\n Returns vector in the projection direction from w to w_star, which defines the resources in\n which we might be able to relax the nonidling constraints\n\n :param w_star: projection of w onto the closest face along the direction of minimum cost.\n :param w: current state in workload space, i.e. w = Xi @ x.\n :return: v_star: vector in the projection direction from w to w_star. If we are in\n the monotone region, then w = w_star and v_star is a vector of np.nan.\n ' return (w_star - w)<|docstring|>Returns vector in the projection direction from w to w_star, which defines the resources in which we might be able to relax the nonidling constraints :param w_star: projection of w onto the closest face along the direction of minimum cost. :param w: current state in workload space, i.e. w = Xi @ x. :return: v_star: vector in the projection direction from w to w_star. If we are in the monotone region, then w = w_star and v_star is a vector of np.nan.<|endoftext|>
ead45e5b8da23e391e9814895e6a37d9dc032cbbd493292935c1ca83a721e9ba
def _non_negative_workloads(self, w: WorkloadSpace, eps: float=1e-06) -> Dict[(str, Any)]: '\n Performs all steps needed to obtain the hedging threshold for the closest face.\n\n :param eps: tolerance value to check that we are not in the negative orthant.\n ' if (not self._is_1d_workload_relaxation(w)): assert (not self._is_negative_orthant(w)) if (not np.any((w > eps))): return {'w': w, 'w_star': w, 'k_idling_set': np.array([])} c_bar = self._get_level_set_for_current_workload(w) if self._is_infeasible(c_bar): return {'w': w, 'w_star': w, 'k_idling_set': np.array([])} elif self._is_defining_a_monotone_region(c_bar): current_workload_vars = {'w': w, 'w_star': w, 'c_bar': c_bar, 'k_idling_set': np.array([])} return current_workload_vars w_star = self._find_workload_with_min_eff_cost_by_idling(w) if self._is_w_inside_monotone_region(w, w_star, c_bar): current_workload_vars = {'w': w, 'w_star': w_star, 'c_bar': c_bar, 'k_idling_set': np.array([])} return current_workload_vars v_star = self._get_vector_defining_possible_idling_direction(w_star, w) k_idling_set = np.where((v_star > eps))[0] current_workload_vars = {'w': w, 'w_star': w_star, 'c_bar': c_bar, 'v_star': v_star, 'k_idling_set': k_idling_set} return current_workload_vars
Performs all steps needed to obtain the hedging threshold for the closest face. :param eps: tolerance value to check that we are not in the negative orthant.
src/snc/agents/hedgehog/strategic_idling/strategic_idling.py
_non_negative_workloads
dmcnamee/snc
5
python
def _non_negative_workloads(self, w: WorkloadSpace, eps: float=1e-06) -> Dict[(str, Any)]: '\n Performs all steps needed to obtain the hedging threshold for the closest face.\n\n :param eps: tolerance value to check that we are not in the negative orthant.\n ' if (not self._is_1d_workload_relaxation(w)): assert (not self._is_negative_orthant(w)) if (not np.any((w > eps))): return {'w': w, 'w_star': w, 'k_idling_set': np.array([])} c_bar = self._get_level_set_for_current_workload(w) if self._is_infeasible(c_bar): return {'w': w, 'w_star': w, 'k_idling_set': np.array([])} elif self._is_defining_a_monotone_region(c_bar): current_workload_vars = {'w': w, 'w_star': w, 'c_bar': c_bar, 'k_idling_set': np.array([])} return current_workload_vars w_star = self._find_workload_with_min_eff_cost_by_idling(w) if self._is_w_inside_monotone_region(w, w_star, c_bar): current_workload_vars = {'w': w, 'w_star': w_star, 'c_bar': c_bar, 'k_idling_set': np.array([])} return current_workload_vars v_star = self._get_vector_defining_possible_idling_direction(w_star, w) k_idling_set = np.where((v_star > eps))[0] current_workload_vars = {'w': w, 'w_star': w_star, 'c_bar': c_bar, 'v_star': v_star, 'k_idling_set': k_idling_set} return current_workload_vars
def _non_negative_workloads(self, w: WorkloadSpace, eps: float=1e-06) -> Dict[(str, Any)]: '\n Performs all steps needed to obtain the hedging threshold for the closest face.\n\n :param eps: tolerance value to check that we are not in the negative orthant.\n ' if (not self._is_1d_workload_relaxation(w)): assert (not self._is_negative_orthant(w)) if (not np.any((w > eps))): return {'w': w, 'w_star': w, 'k_idling_set': np.array([])} c_bar = self._get_level_set_for_current_workload(w) if self._is_infeasible(c_bar): return {'w': w, 'w_star': w, 'k_idling_set': np.array([])} elif self._is_defining_a_monotone_region(c_bar): current_workload_vars = {'w': w, 'w_star': w, 'c_bar': c_bar, 'k_idling_set': np.array([])} return current_workload_vars w_star = self._find_workload_with_min_eff_cost_by_idling(w) if self._is_w_inside_monotone_region(w, w_star, c_bar): current_workload_vars = {'w': w, 'w_star': w_star, 'c_bar': c_bar, 'k_idling_set': np.array([])} return current_workload_vars v_star = self._get_vector_defining_possible_idling_direction(w_star, w) k_idling_set = np.where((v_star > eps))[0] current_workload_vars = {'w': w, 'w_star': w_star, 'c_bar': c_bar, 'v_star': v_star, 'k_idling_set': k_idling_set} return current_workload_vars<|docstring|>Performs all steps needed to obtain the hedging threshold for the closest face. :param eps: tolerance value to check that we are not in the negative orthant.<|endoftext|>
467e205da4509fba762f5920d455ab8e23d4e34a8f9a9c946758b24993ee6b68
def _negative_workloads(self, w: WorkloadSpace, eps: float=1e-06) -> Dict[(str, Any)]: '\n For strategic idling with no hedging when workload has no positive\n components all resources are allowed to idle.\n\n :param w: current state in workload space, i.e. w = Xi x.\n :param eps: tolerance value to check that we are in the negative orthant.\n ' assert self._is_negative_orthant(w, eps) current_workload_variables = {'w': w, 'w_star': w, 'k_idling_set': np.arange(len(w))} return current_workload_variables
For strategic idling with no hedging when workload has no positive components all resources are allowed to idle. :param w: current state in workload space, i.e. w = Xi x. :param eps: tolerance value to check that we are in the negative orthant.
src/snc/agents/hedgehog/strategic_idling/strategic_idling.py
_negative_workloads
dmcnamee/snc
5
python
def _negative_workloads(self, w: WorkloadSpace, eps: float=1e-06) -> Dict[(str, Any)]: '\n For strategic idling with no hedging when workload has no positive\n components all resources are allowed to idle.\n\n :param w: current state in workload space, i.e. w = Xi x.\n :param eps: tolerance value to check that we are in the negative orthant.\n ' assert self._is_negative_orthant(w, eps) current_workload_variables = {'w': w, 'w_star': w, 'k_idling_set': np.arange(len(w))} return current_workload_variables
def _negative_workloads(self, w: WorkloadSpace, eps: float=1e-06) -> Dict[(str, Any)]: '\n For strategic idling with no hedging when workload has no positive\n components all resources are allowed to idle.\n\n :param w: current state in workload space, i.e. w = Xi x.\n :param eps: tolerance value to check that we are in the negative orthant.\n ' assert self._is_negative_orthant(w, eps) current_workload_variables = {'w': w, 'w_star': w, 'k_idling_set': np.arange(len(w))} return current_workload_variables<|docstring|>For strategic idling with no hedging when workload has no positive components all resources are allowed to idle. :param w: current state in workload space, i.e. w = Xi x. :param eps: tolerance value to check that we are in the negative orthant.<|endoftext|>
75e99b055731413a5e00640e0f1434513d773c59175e8d02d51903438b343030
def get_allowed_idling_directions(self, state: StateSpace) -> StrategicIdlingOutput: '\n Method projects current worload onto the full monotone effective cost cone in order\n to identify allowed idling directions.\n\n :param state: current buffer state of the network.\n :return: StrategicIdlingOutput\n - k_idling_set: set of possibly idling directions.\n ' w = (self._workload_mat @ state) if (self._is_negative_orthant(w) and (not self._is_1d_workload_relaxation(w))): idling_decision_dict = self._negative_workloads(w) else: idling_decision_dict = self._non_negative_workloads(w) return self._get_null_strategic_idling_output(**idling_decision_dict)
Method projects current worload onto the full monotone effective cost cone in order to identify allowed idling directions. :param state: current buffer state of the network. :return: StrategicIdlingOutput - k_idling_set: set of possibly idling directions.
src/snc/agents/hedgehog/strategic_idling/strategic_idling.py
get_allowed_idling_directions
dmcnamee/snc
5
python
def get_allowed_idling_directions(self, state: StateSpace) -> StrategicIdlingOutput: '\n Method projects current worload onto the full monotone effective cost cone in order\n to identify allowed idling directions.\n\n :param state: current buffer state of the network.\n :return: StrategicIdlingOutput\n - k_idling_set: set of possibly idling directions.\n ' w = (self._workload_mat @ state) if (self._is_negative_orthant(w) and (not self._is_1d_workload_relaxation(w))): idling_decision_dict = self._negative_workloads(w) else: idling_decision_dict = self._non_negative_workloads(w) return self._get_null_strategic_idling_output(**idling_decision_dict)
def get_allowed_idling_directions(self, state: StateSpace) -> StrategicIdlingOutput: '\n Method projects current worload onto the full monotone effective cost cone in order\n to identify allowed idling directions.\n\n :param state: current buffer state of the network.\n :return: StrategicIdlingOutput\n - k_idling_set: set of possibly idling directions.\n ' w = (self._workload_mat @ state) if (self._is_negative_orthant(w) and (not self._is_1d_workload_relaxation(w))): idling_decision_dict = self._negative_workloads(w) else: idling_decision_dict = self._non_negative_workloads(w) return self._get_null_strategic_idling_output(**idling_decision_dict)<|docstring|>Method projects current worload onto the full monotone effective cost cone in order to identify allowed idling directions. :param state: current buffer state of the network. :return: StrategicIdlingOutput - k_idling_set: set of possibly idling directions.<|endoftext|>
5165d9aeba262573b78be434e6cc07c2c4e55dc6d9884dc472852bfa36d8b9c0
def to_serializable(self) -> Dict: 'Return a serializable object, that can be used by a JSON Encoder' return clean_to_serializable(self)
Return a serializable object, that can be used by a JSON Encoder
src/snc/agents/hedgehog/strategic_idling/strategic_idling.py
to_serializable
dmcnamee/snc
5
python
def to_serializable(self) -> Dict: return clean_to_serializable(self)
def to_serializable(self) -> Dict: return clean_to_serializable(self)<|docstring|>Return a serializable object, that can be used by a JSON Encoder<|endoftext|>
ee288f85acae83a12bf399cf344c8f27f438327d3bae4a17504bc999fd5eea98
def parse_input(fname: str) -> List[Dict[(str, str)]]: 'Read the input file and return the parsed data.' with open(fname, 'rt') as inf: raw: str = inf.read() blocks: List[str] = raw.split((2 * os.linesep)) data: List[Dict[(str, str)]] = [] for block in blocks: passport: Dict[(str, str)] = {} for segment in block.replace('\n', ' ').split(): (key, value) = segment.split(':') passport[key] = value data.append(passport) return data
Read the input file and return the parsed data.
2020/04-passport_processing/passport_processing.py
parse_input
BrendanLeber/adventofcode
0
python
def parse_input(fname: str) -> List[Dict[(str, str)]]: with open(fname, 'rt') as inf: raw: str = inf.read() blocks: List[str] = raw.split((2 * os.linesep)) data: List[Dict[(str, str)]] = [] for block in blocks: passport: Dict[(str, str)] = {} for segment in block.replace('\n', ' ').split(): (key, value) = segment.split(':') passport[key] = value data.append(passport) return data
def parse_input(fname: str) -> List[Dict[(str, str)]]: with open(fname, 'rt') as inf: raw: str = inf.read() blocks: List[str] = raw.split((2 * os.linesep)) data: List[Dict[(str, str)]] = [] for block in blocks: passport: Dict[(str, str)] = {} for segment in block.replace('\n', ' ').split(): (key, value) = segment.split(':') passport[key] = value data.append(passport) return data<|docstring|>Read the input file and return the parsed data.<|endoftext|>
e9c67a8738a748c929c6917e68bd5d87978bea8ccfa67c12b2830cd581b11e77
def has_all_required_keys(passport: Dict[(str, str)]) -> bool: 'Return true if the given passport contains all of the required keys.' for required in REQUIRED_KEYS: if (required not in passport.keys()): return False return True
Return true if the given passport contains all of the required keys.
2020/04-passport_processing/passport_processing.py
has_all_required_keys
BrendanLeber/adventofcode
0
python
def has_all_required_keys(passport: Dict[(str, str)]) -> bool: for required in REQUIRED_KEYS: if (required not in passport.keys()): return False return True
def has_all_required_keys(passport: Dict[(str, str)]) -> bool: for required in REQUIRED_KEYS: if (required not in passport.keys()): return False return True<|docstring|>Return true if the given passport contains all of the required keys.<|endoftext|>
cc057823811c12dfa7bd9a12b248cfa25cc8225c966baf36c4c94182d8a3fcce
def _iterable(obj): ' Returns True if `obj` is iterable. ' try: iter(obj) except TypeError: return False return True
Returns True if `obj` is iterable.
roytherobot/__init__.py
_iterable
tuslisoftware/roytherobot
2
python
def _iterable(obj): ' ' try: iter(obj) except TypeError: return False return True
def _iterable(obj): ' ' try: iter(obj) except TypeError: return False return True<|docstring|>Returns True if `obj` is iterable.<|endoftext|>
28bba509b1021e1f6e943c80f2f7c87070fc2fe7797073b855cd7dcfcbc2b1f8
def __init__(self, port, write_sleep=0.05): "\n Initialization. Parallax Propeller needs a baud rate of 2400, no\n parity, and one or two stop bits.\n\n Parameters\n ----------\n port : str\n The name of the serial port. Usually '/dev/ttyUSB0' for Linux,\n 'COM3' for Windows, etc.\n write_sleep : float, optional\n How long to wait in seconds after writing. For 2400 baud rate,\n less than 0.03 tends to cause communication errors.\n\n " Serial.__init__(self, port=port, baudrate=2400, parity='N', stopbits=1, timeout=5.0) self.write_sleep = write_sleep time.sleep(0.1)
Initialization. Parallax Propeller needs a baud rate of 2400, no parity, and one or two stop bits. Parameters ---------- port : str The name of the serial port. Usually '/dev/ttyUSB0' for Linux, 'COM3' for Windows, etc. write_sleep : float, optional How long to wait in seconds after writing. For 2400 baud rate, less than 0.03 tends to cause communication errors.
roytherobot/__init__.py
__init__
tuslisoftware/roytherobot
2
python
def __init__(self, port, write_sleep=0.05): "\n Initialization. Parallax Propeller needs a baud rate of 2400, no\n parity, and one or two stop bits.\n\n Parameters\n ----------\n port : str\n The name of the serial port. Usually '/dev/ttyUSB0' for Linux,\n 'COM3' for Windows, etc.\n write_sleep : float, optional\n How long to wait in seconds after writing. For 2400 baud rate,\n less than 0.03 tends to cause communication errors.\n\n " Serial.__init__(self, port=port, baudrate=2400, parity='N', stopbits=1, timeout=5.0) self.write_sleep = write_sleep time.sleep(0.1)
def __init__(self, port, write_sleep=0.05): "\n Initialization. Parallax Propeller needs a baud rate of 2400, no\n parity, and one or two stop bits.\n\n Parameters\n ----------\n port : str\n The name of the serial port. Usually '/dev/ttyUSB0' for Linux,\n 'COM3' for Windows, etc.\n write_sleep : float, optional\n How long to wait in seconds after writing. For 2400 baud rate,\n less than 0.03 tends to cause communication errors.\n\n " Serial.__init__(self, port=port, baudrate=2400, parity='N', stopbits=1, timeout=5.0) self.write_sleep = write_sleep time.sleep(0.1)<|docstring|>Initialization. Parallax Propeller needs a baud rate of 2400, no parity, and one or two stop bits. Parameters ---------- port : str The name of the serial port. Usually '/dev/ttyUSB0' for Linux, 'COM3' for Windows, etc. write_sleep : float, optional How long to wait in seconds after writing. For 2400 baud rate, less than 0.03 tends to cause communication errors.<|endoftext|>
00f2854e74a8123fd1d3013b57931ce0c24f817a89fd7a9d8fe9191c3ef084c3
def _write(self, value): '\n Overrides serial.Serial.write() to sleep after writing.\n\n Parameters\n ----------\n value : bytearray\n Value to write.\n\n ' self.write(value) time.sleep(self.write_sleep)
Overrides serial.Serial.write() to sleep after writing. Parameters ---------- value : bytearray Value to write.
roytherobot/__init__.py
_write
tuslisoftware/roytherobot
2
python
def _write(self, value): '\n Overrides serial.Serial.write() to sleep after writing.\n\n Parameters\n ----------\n value : bytearray\n Value to write.\n\n ' self.write(value) time.sleep(self.write_sleep)
def _write(self, value): '\n Overrides serial.Serial.write() to sleep after writing.\n\n Parameters\n ----------\n value : bytearray\n Value to write.\n\n ' self.write(value) time.sleep(self.write_sleep)<|docstring|>Overrides serial.Serial.write() to sleep after writing. Parameters ---------- value : bytearray Value to write.<|endoftext|>
4abc4e976fdfa19151d5db7959149ed9b401208d43d5e78ff5690d8eb853d96e
def set_position(self, channel, ramp_speed, pulse_width): "\n Moves the servo.\n Serial command: '!SC' + <channel> + <ramp_speed> + <pulse_width> + '\r'\n\n Parameters\n ----------\n channel : int\n Servo channel, range 0-15.\n ramp_speed : int\n Speed the servo moves to its new position, range 0-63.\n pulse_width : int\n Pulse width in microseconds determines the position of the servo.\n The value ranges depend on the individual servo.\n\n Raises\n ------\n PropellerError for invalid channel, ramp_speed or pulse_width values.\n\n " if (not isinstance(channel, int)): raise PropellerError('Channel must be an integer.') if (not isinstance(ramp_speed, int)): raise PropellerError('Ramp speed must be an integer.') if (not isinstance(pulse_width, int)): raise PropellerError('Pulse width must be an integer.') if (channel not in range(16)): raise PropellerError('Channel must be between 0 and 15.') max_pw = self.MAX_PULSE_WIDTH max_rs = self.MAX_RAMP_SPEED if ((ramp_speed < 0) or (ramp_speed > max_rs)): msg = ('Ramp speed must be between 0 and %d.' % max_rs) raise PropellerError(msg) if ((pulse_width < 0) or (pulse_width > max_pw)): msg = ('Pulse width must be between 0 and %d.' % max_pw) raise PropellerError(msg) ch = struct.pack(b'<b', channel) rs = struct.pack(b'<b', ramp_speed) pw = struct.pack(b'<h', pulse_width) command = ((((b'!SC' + ch) + rs) + pw) + self.CR) self._write(command)
Moves the servo. Serial command: '!SC' + <channel> + <ramp_speed> + <pulse_width> + ' ' Parameters ---------- channel : int Servo channel, range 0-15. ramp_speed : int Speed the servo moves to its new position, range 0-63. pulse_width : int Pulse width in microseconds determines the position of the servo. The value ranges depend on the individual servo. Raises ------ PropellerError for invalid channel, ramp_speed or pulse_width values.
roytherobot/__init__.py
set_position
tuslisoftware/roytherobot
2
python
def set_position(self, channel, ramp_speed, pulse_width): "\n Moves the servo.\n Serial command: '!SC' + <channel> + <ramp_speed> + <pulse_width> + '\r'\n\n Parameters\n ----------\n channel : int\n Servo channel, range 0-15.\n ramp_speed : int\n Speed the servo moves to its new position, range 0-63.\n pulse_width : int\n Pulse width in microseconds determines the position of the servo.\n The value ranges depend on the individual servo.\n\n Raises\n ------\n PropellerError for invalid channel, ramp_speed or pulse_width values.\n\n " if (not isinstance(channel, int)): raise PropellerError('Channel must be an integer.') if (not isinstance(ramp_speed, int)): raise PropellerError('Ramp speed must be an integer.') if (not isinstance(pulse_width, int)): raise PropellerError('Pulse width must be an integer.') if (channel not in range(16)): raise PropellerError('Channel must be between 0 and 15.') max_pw = self.MAX_PULSE_WIDTH max_rs = self.MAX_RAMP_SPEED if ((ramp_speed < 0) or (ramp_speed > max_rs)): msg = ('Ramp speed must be between 0 and %d.' % max_rs) raise PropellerError(msg) if ((pulse_width < 0) or (pulse_width > max_pw)): msg = ('Pulse width must be between 0 and %d.' % max_pw) raise PropellerError(msg) ch = struct.pack(b'<b', channel) rs = struct.pack(b'<b', ramp_speed) pw = struct.pack(b'<h', pulse_width) command = ((((b'!SC' + ch) + rs) + pw) + self.CR) self._write(command)
def set_position(self, channel, ramp_speed, pulse_width): "\n Moves the servo.\n Serial command: '!SC' + <channel> + <ramp_speed> + <pulse_width> + '\r'\n\n Parameters\n ----------\n channel : int\n Servo channel, range 0-15.\n ramp_speed : int\n Speed the servo moves to its new position, range 0-63.\n pulse_width : int\n Pulse width in microseconds determines the position of the servo.\n The value ranges depend on the individual servo.\n\n Raises\n ------\n PropellerError for invalid channel, ramp_speed or pulse_width values.\n\n " if (not isinstance(channel, int)): raise PropellerError('Channel must be an integer.') if (not isinstance(ramp_speed, int)): raise PropellerError('Ramp speed must be an integer.') if (not isinstance(pulse_width, int)): raise PropellerError('Pulse width must be an integer.') if (channel not in range(16)): raise PropellerError('Channel must be between 0 and 15.') max_pw = self.MAX_PULSE_WIDTH max_rs = self.MAX_RAMP_SPEED if ((ramp_speed < 0) or (ramp_speed > max_rs)): msg = ('Ramp speed must be between 0 and %d.' % max_rs) raise PropellerError(msg) if ((pulse_width < 0) or (pulse_width > max_pw)): msg = ('Pulse width must be between 0 and %d.' % max_pw) raise PropellerError(msg) ch = struct.pack(b'<b', channel) rs = struct.pack(b'<b', ramp_speed) pw = struct.pack(b'<h', pulse_width) command = ((((b'!SC' + ch) + rs) + pw) + self.CR) self._write(command)<|docstring|>Moves the servo. Serial command: '!SC' + <channel> + <ramp_speed> + <pulse_width> + ' ' Parameters ---------- channel : int Servo channel, range 0-15. ramp_speed : int Speed the servo moves to its new position, range 0-63. pulse_width : int Pulse width in microseconds determines the position of the servo. The value ranges depend on the individual servo. Raises ------ PropellerError for invalid channel, ramp_speed or pulse_width values.<|endoftext|>
6ba1aa777e01eb8115b449127a44b52d14f7986ca77abf6a43fd01cfe69544a0
def set_baud_rate(self, mode=0): '\n Sets the baud rate to either 2400 bps (mode 0) or 38.4 kbps (mode 1).\n Serial command: \'!SCSBR\' + <mode> + \'\r\'\n\n *WARNING* Mode 1 does NOT currently return \'BR\' + <mode> for some\n reason, and is not confirmed to actually work.\n\n Parameters\n ----------\n mode : int, optional\n Choose 0 (default) for 2400 bps and 1 for 38.4 kbps.\n\n Returns\n -------\n out : str or None\n String equal to: "BR" + <mode>.\n\n Raises\n ------\n PropellerError for invalid mode value.\n\n ' baudrates = [2400, 38400] if (mode not in range(2)): raise PropellerError('Mode must be 0 or 1.') if (mode == 1): warning = "WARNING: Mode 1 is currently experimental, and has not been confirmed to work yet. No reply of 'BR' + <mode> is received for mode 1." print(warning) command = ((b'!SCSBR' + struct.pack(b'<b', mode)) + self.CR) self._write(command) self.baudrate = baudrates[mode] out = None if (mode == 0): retval = self.read(3) retcmd = retval[:2].decode('ascii') if isinstance(retval[2], int): retmode = retval[2] else: retmode = struct.unpack(b'<b', retval[2])[0] out = (retcmd + str(retmode)) return out
Sets the baud rate to either 2400 bps (mode 0) or 38.4 kbps (mode 1). Serial command: '!SCSBR' + <mode> + ' ' *WARNING* Mode 1 does NOT currently return 'BR' + <mode> for some reason, and is not confirmed to actually work. Parameters ---------- mode : int, optional Choose 0 (default) for 2400 bps and 1 for 38.4 kbps. Returns ------- out : str or None String equal to: "BR" + <mode>. Raises ------ PropellerError for invalid mode value.
roytherobot/__init__.py
set_baud_rate
tuslisoftware/roytherobot
2
python
def set_baud_rate(self, mode=0): '\n Sets the baud rate to either 2400 bps (mode 0) or 38.4 kbps (mode 1).\n Serial command: \'!SCSBR\' + <mode> + \'\r\'\n\n *WARNING* Mode 1 does NOT currently return \'BR\' + <mode> for some\n reason, and is not confirmed to actually work.\n\n Parameters\n ----------\n mode : int, optional\n Choose 0 (default) for 2400 bps and 1 for 38.4 kbps.\n\n Returns\n -------\n out : str or None\n String equal to: "BR" + <mode>.\n\n Raises\n ------\n PropellerError for invalid mode value.\n\n ' baudrates = [2400, 38400] if (mode not in range(2)): raise PropellerError('Mode must be 0 or 1.') if (mode == 1): warning = "WARNING: Mode 1 is currently experimental, and has not been confirmed to work yet. No reply of 'BR' + <mode> is received for mode 1." print(warning) command = ((b'!SCSBR' + struct.pack(b'<b', mode)) + self.CR) self._write(command) self.baudrate = baudrates[mode] out = None if (mode == 0): retval = self.read(3) retcmd = retval[:2].decode('ascii') if isinstance(retval[2], int): retmode = retval[2] else: retmode = struct.unpack(b'<b', retval[2])[0] out = (retcmd + str(retmode)) return out
def set_baud_rate(self, mode=0): '\n Sets the baud rate to either 2400 bps (mode 0) or 38.4 kbps (mode 1).\n Serial command: \'!SCSBR\' + <mode> + \'\r\'\n\n *WARNING* Mode 1 does NOT currently return \'BR\' + <mode> for some\n reason, and is not confirmed to actually work.\n\n Parameters\n ----------\n mode : int, optional\n Choose 0 (default) for 2400 bps and 1 for 38.4 kbps.\n\n Returns\n -------\n out : str or None\n String equal to: "BR" + <mode>.\n\n Raises\n ------\n PropellerError for invalid mode value.\n\n ' baudrates = [2400, 38400] if (mode not in range(2)): raise PropellerError('Mode must be 0 or 1.') if (mode == 1): warning = "WARNING: Mode 1 is currently experimental, and has not been confirmed to work yet. No reply of 'BR' + <mode> is received for mode 1." print(warning) command = ((b'!SCSBR' + struct.pack(b'<b', mode)) + self.CR) self._write(command) self.baudrate = baudrates[mode] out = None if (mode == 0): retval = self.read(3) retcmd = retval[:2].decode('ascii') if isinstance(retval[2], int): retmode = retval[2] else: retmode = struct.unpack(b'<b', retval[2])[0] out = (retcmd + str(retmode)) return out<|docstring|>Sets the baud rate to either 2400 bps (mode 0) or 38.4 kbps (mode 1). Serial command: '!SCSBR' + <mode> + ' ' *WARNING* Mode 1 does NOT currently return 'BR' + <mode> for some reason, and is not confirmed to actually work. Parameters ---------- mode : int, optional Choose 0 (default) for 2400 bps and 1 for 38.4 kbps. Returns ------- out : str or None String equal to: "BR" + <mode>. Raises ------ PropellerError for invalid mode value.<|endoftext|>
4b5d74ed01b7abade15513ca29781f6d3eefa4ab345ce3dbd276cfa1cb2084ce
def set_software_port(self, mode=0): '\n Assigns the PSCU to act on commands sent to channels 0-15 (mode 0) or\n channels 16-31 (mode 1).\n Serial command: \'!SCPSS\' + <mode> + \'\r\'\n\n Parameters\n ----------\n mode : int, optional\n Choose 0 (default) for channels 0-15 and 1 for channels 16-31.\n\n Returns\n -------\n out : str\n String equal to: "PM" + <mode>.\n\n Raises\n ------\n PropellerError for invalid mode value.\n\n ' if (mode not in range(0, 2)): raise PropellerError('Mode must be 0 or 1.') command = ((b'!SCPSS' + struct.pack(b'<b', mode)) + self.CR) self._write(command) retval = self.read(3) retcmd = retval[:2].decode('ascii') if isinstance(retval[2], int): retmode = retval[2] else: retmode = struct.unpack(b'<b', retval[2])[0] out = (retcmd + str(retmode)) return out
Assigns the PSCU to act on commands sent to channels 0-15 (mode 0) or channels 16-31 (mode 1). Serial command: '!SCPSS' + <mode> + ' ' Parameters ---------- mode : int, optional Choose 0 (default) for channels 0-15 and 1 for channels 16-31. Returns ------- out : str String equal to: "PM" + <mode>. Raises ------ PropellerError for invalid mode value.
roytherobot/__init__.py
set_software_port
tuslisoftware/roytherobot
2
python
def set_software_port(self, mode=0): '\n Assigns the PSCU to act on commands sent to channels 0-15 (mode 0) or\n channels 16-31 (mode 1).\n Serial command: \'!SCPSS\' + <mode> + \'\r\'\n\n Parameters\n ----------\n mode : int, optional\n Choose 0 (default) for channels 0-15 and 1 for channels 16-31.\n\n Returns\n -------\n out : str\n String equal to: "PM" + <mode>.\n\n Raises\n ------\n PropellerError for invalid mode value.\n\n ' if (mode not in range(0, 2)): raise PropellerError('Mode must be 0 or 1.') command = ((b'!SCPSS' + struct.pack(b'<b', mode)) + self.CR) self._write(command) retval = self.read(3) retcmd = retval[:2].decode('ascii') if isinstance(retval[2], int): retmode = retval[2] else: retmode = struct.unpack(b'<b', retval[2])[0] out = (retcmd + str(retmode)) return out
def set_software_port(self, mode=0): '\n Assigns the PSCU to act on commands sent to channels 0-15 (mode 0) or\n channels 16-31 (mode 1).\n Serial command: \'!SCPSS\' + <mode> + \'\r\'\n\n Parameters\n ----------\n mode : int, optional\n Choose 0 (default) for channels 0-15 and 1 for channels 16-31.\n\n Returns\n -------\n out : str\n String equal to: "PM" + <mode>.\n\n Raises\n ------\n PropellerError for invalid mode value.\n\n ' if (mode not in range(0, 2)): raise PropellerError('Mode must be 0 or 1.') command = ((b'!SCPSS' + struct.pack(b'<b', mode)) + self.CR) self._write(command) retval = self.read(3) retcmd = retval[:2].decode('ascii') if isinstance(retval[2], int): retmode = retval[2] else: retmode = struct.unpack(b'<b', retval[2])[0] out = (retcmd + str(retmode)) return out<|docstring|>Assigns the PSCU to act on commands sent to channels 0-15 (mode 0) or channels 16-31 (mode 1). Serial command: '!SCPSS' + <mode> + ' ' Parameters ---------- mode : int, optional Choose 0 (default) for channels 0-15 and 1 for channels 16-31. Returns ------- out : str String equal to: "PM" + <mode>. Raises ------ PropellerError for invalid mode value.<|endoftext|>
d364e0a8cdd873fbed2720847e6fae5e59479621f27d70f2d5b3713cec604ea3
def get_position(self, channel): "\n Returns the servo position for the channel.\n Serial command: '!SCRSP' + <channel> + '\r'\n\n Parameters\n ----------\n channel : int\n Servo channel, range 0-15.\n\n Returns\n -------\n pulse_width : int\n Pulse width in microseconds determines the position of the servo.\n The value ranges depend on the individual servo.\n\n Raises\n ------\n PropellerError for invalid channel value.\n\n " if (not isinstance(channel, int)): raise PropellerError('Channel must be an integer.') if (channel not in range(16)): raise PropellerError('Channel must be between 0 and 15.') command = ((b'!SCRSP' + struct.pack(b'<b', channel)) + self.CR) self._write(command) retval = self.read(3) pulse_width = struct.unpack(b'>h', retval[1:])[0] return pulse_width
Returns the servo position for the channel. Serial command: '!SCRSP' + <channel> + ' ' Parameters ---------- channel : int Servo channel, range 0-15. Returns ------- pulse_width : int Pulse width in microseconds determines the position of the servo. The value ranges depend on the individual servo. Raises ------ PropellerError for invalid channel value.
roytherobot/__init__.py
get_position
tuslisoftware/roytherobot
2
python
def get_position(self, channel): "\n Returns the servo position for the channel.\n Serial command: '!SCRSP' + <channel> + '\r'\n\n Parameters\n ----------\n channel : int\n Servo channel, range 0-15.\n\n Returns\n -------\n pulse_width : int\n Pulse width in microseconds determines the position of the servo.\n The value ranges depend on the individual servo.\n\n Raises\n ------\n PropellerError for invalid channel value.\n\n " if (not isinstance(channel, int)): raise PropellerError('Channel must be an integer.') if (channel not in range(16)): raise PropellerError('Channel must be between 0 and 15.') command = ((b'!SCRSP' + struct.pack(b'<b', channel)) + self.CR) self._write(command) retval = self.read(3) pulse_width = struct.unpack(b'>h', retval[1:])[0] return pulse_width
def get_position(self, channel): "\n Returns the servo position for the channel.\n Serial command: '!SCRSP' + <channel> + '\r'\n\n Parameters\n ----------\n channel : int\n Servo channel, range 0-15.\n\n Returns\n -------\n pulse_width : int\n Pulse width in microseconds determines the position of the servo.\n The value ranges depend on the individual servo.\n\n Raises\n ------\n PropellerError for invalid channel value.\n\n " if (not isinstance(channel, int)): raise PropellerError('Channel must be an integer.') if (channel not in range(16)): raise PropellerError('Channel must be between 0 and 15.') command = ((b'!SCRSP' + struct.pack(b'<b', channel)) + self.CR) self._write(command) retval = self.read(3) pulse_width = struct.unpack(b'>h', retval[1:])[0] return pulse_width<|docstring|>Returns the servo position for the channel. Serial command: '!SCRSP' + <channel> + ' ' Parameters ---------- channel : int Servo channel, range 0-15. Returns ------- pulse_width : int Pulse width in microseconds determines the position of the servo. The value ranges depend on the individual servo. Raises ------ PropellerError for invalid channel value.<|endoftext|>
7eddc25b540e3b72aea349a3f90c14406ce8e27fbbc095ec2e049e55b3037ffd
def enable_servo(self, channel): "\n Enables the servo.\n Serial command: '!SCPSE' + <channel> + '\r'\n\n Parameters\n ----------\n channel : int\n Servo channel, range 0-15.\n\n Raises\n ------\n PropellerError for invalid channel value.\n\n " if (not isinstance(channel, int)): raise PropellerError('Channel must be an integer.') if (channel not in range(16)): raise PropellerError('Channel must be between 0 and 15.') command = ((b'!SCPSE' + struct.pack(b'<b', channel)) + self.CR) self._write(command)
Enables the servo. Serial command: '!SCPSE' + <channel> + ' ' Parameters ---------- channel : int Servo channel, range 0-15. Raises ------ PropellerError for invalid channel value.
roytherobot/__init__.py
enable_servo
tuslisoftware/roytherobot
2
python
def enable_servo(self, channel): "\n Enables the servo.\n Serial command: '!SCPSE' + <channel> + '\r'\n\n Parameters\n ----------\n channel : int\n Servo channel, range 0-15.\n\n Raises\n ------\n PropellerError for invalid channel value.\n\n " if (not isinstance(channel, int)): raise PropellerError('Channel must be an integer.') if (channel not in range(16)): raise PropellerError('Channel must be between 0 and 15.') command = ((b'!SCPSE' + struct.pack(b'<b', channel)) + self.CR) self._write(command)
def enable_servo(self, channel): "\n Enables the servo.\n Serial command: '!SCPSE' + <channel> + '\r'\n\n Parameters\n ----------\n channel : int\n Servo channel, range 0-15.\n\n Raises\n ------\n PropellerError for invalid channel value.\n\n " if (not isinstance(channel, int)): raise PropellerError('Channel must be an integer.') if (channel not in range(16)): raise PropellerError('Channel must be between 0 and 15.') command = ((b'!SCPSE' + struct.pack(b'<b', channel)) + self.CR) self._write(command)<|docstring|>Enables the servo. Serial command: '!SCPSE' + <channel> + ' ' Parameters ---------- channel : int Servo channel, range 0-15. Raises ------ PropellerError for invalid channel value.<|endoftext|>