index
int64
repo_name
string
branch_name
string
path
string
content
string
import_graph
string
24,754
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/cinder/test_volume_types.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally import exceptions as rally_exceptions from rally_openstack.task.scenarios.cinder import volume_types from tests.unit import test class CinderVolumeTypesTestCase(test.ScenarioTestCase): def setUp(self): super(CinderVolumeTypesTestCase, self).setUp() patch = mock.patch( "rally_openstack.common.services.storage.block.BlockStorage") self.addCleanup(patch.stop) self.mock_cinder = patch.start() def _get_context(self): context = test.get_test_context() context.update({ "admin": { "id": "fake_user_id", "credential": mock.MagicMock() }, "user": {"id": "fake_user_id", "credential": mock.MagicMock()}, "tenant": {"id": "fake", "name": "fake"}}) return context def test_create_and_get_volume_type(self): mock_service = self.mock_cinder.return_value scenario = volume_types.CreateAndGetVolumeType(self._get_context()) description = "rally tests creating types" is_public = False scenario.run(description=description, is_public=is_public) mock_service.create_volume_type.assert_called_once_with( description=description, is_public=is_public) mock_service.get_volume_type.assert_called_once_with( mock_service.create_volume_type.return_value) def test_create_and_delete_volume_type(self): mock_service = self.mock_cinder.return_value scenario = volume_types.CreateAndDeleteVolumeType(self._get_context()) description = "rally tests creating types" is_public = False scenario.run(description=description, is_public=is_public) mock_service.create_volume_type.assert_called_once_with( description=description, is_public=is_public) mock_service.delete_volume_type.assert_called_once_with( mock_service.create_volume_type.return_value) def test_create_and_delete_encryption_type(self): mock_service = self.mock_cinder.return_value context = self._get_context() context.update({ "volume_types": [{"id": "fake_id", "name": "fake_name"}], "iteration": 1}) scenario = volume_types.CreateAndDeleteEncryptionType( context) # case: create_specs is None specs = { "provider": "prov", "cipher": "cip", "key_size": "ks", "control_location": "cl" } scenario.run(create_specs=None, provider="prov", cipher="cip", key_size="ks", control_location="cl") mock_service.create_encryption_type.assert_called_once_with( "fake_id", specs=specs) mock_service.delete_encryption_type.assert_called_once_with( "fake_id") # case: create_specs is not None scenario.run(create_specs="fakecreatespecs", provider="prov", cipher="cip", key_size="ks", control_location="cl") mock_service.create_encryption_type.assert_called_with( "fake_id", specs="fakecreatespecs") mock_service.delete_encryption_type.assert_called_with( "fake_id") def test_create_get_and_delete_encryption_type(self): mock_service = self.mock_cinder.return_value context = self._get_context() context.update({ "volume_types": [{"id": "fake_id", "name": "fake_name"}], "iteration": 1}) scenario = volume_types.CreateGetAndDeleteEncryptionType( context) specs = { "provider": "prov", "cipher": "cip", "key_size": "ks", "control_location": "cl" } scenario.run(provider="prov", cipher="cip", key_size="ks", control_location="cl") mock_service.create_encryption_type.assert_called_once_with( "fake_id", specs=specs) mock_service.get_encryption_type.assert_called_once_with( "fake_id") mock_service.delete_encryption_type.assert_called_once_with( "fake_id") def test_create_and_list_volume_types(self): mock_service = self.mock_cinder.return_value fake_type = mock.Mock() pool_list = [mock.Mock(), mock.Mock(), fake_type] description = "rally tests creating types" is_public = False scenario = volume_types.CreateAndListVolumeTypes(self._get_context()) mock_service.create_volume_type.return_value = fake_type mock_service.list_types.return_value = pool_list scenario.run(description=description, is_public=is_public) mock_service.create_volume_type.assert_called_once_with( description=description, is_public=is_public) mock_service.list_types.assert_called_once_with() def test_create_and_list_volume_types_with_fails(self): # Negative case: type isn't listed mock_service = self.mock_cinder.return_value fake_type = mock.Mock() pool_list = [mock.Mock(), mock.Mock(), mock.Mock()] description = "rally tests creating types" is_public = False scenario = volume_types.CreateAndListVolumeTypes(self._get_context()) mock_service.create_volume_type.return_value = fake_type mock_service.list_types.return_value = pool_list self.assertRaises(rally_exceptions.RallyAssertionError, scenario.run, description=description, is_public=is_public) mock_service.create_volume_type.assert_called_once_with( description=description, is_public=is_public) mock_service.list_types.assert_called_once_with() def test_create_and_update_volume_type(self): mock_service = self.mock_cinder.return_value scenario = volume_types.CreateAndUpdateVolumeType(self._get_context()) fake_type = mock.MagicMock() fake_type.name = "any" create_description = "test create" update_description = "test update" mock_service.create_volume_type.return_value = fake_type scenario.run(description=create_description, update_description=update_description) mock_service.create_volume_type.assert_called_once_with( description=create_description, is_public=True) mock_service.update_volume_type.assert_called_once_with( fake_type, description=update_description, # update_is_public and update_name are not specified, so should # not be used is_public=None, name=None ) def test_create_volume_type_and_encryption_type(self): mock_service = self.mock_cinder.return_value scenario = volume_types.CreateVolumeTypeAndEncryptionType( self._get_context()) description = "rally tests creating types" is_public = False # case: create_specs is None specs = { "provider": "prov", "cipher": "cip", "key_size": "ks", "control_location": "cl" } scenario.run(create_specs=None, provider="prov", cipher="cip", key_size="ks", control_location="cl", description=description, is_public=is_public) mock_service.create_volume_type.assert_called_once_with( description=description, is_public=is_public) mock_service.create_encryption_type.assert_called_once_with( mock_service.create_volume_type.return_value, specs=specs) # case: create_specs is not None scenario.run(create_specs="fakecreatespecs", provider="prov", cipher="cip", key_size="ks", control_location="cl", description=description, is_public=is_public) mock_service.create_volume_type.assert_called_with( description=description, is_public=is_public) mock_service.create_encryption_type.assert_called_with( mock_service.create_volume_type.return_value, specs="fakecreatespecs") def test_create_and_list_encryption_type(self): mock_service = self.mock_cinder.return_value context = self._get_context() context.update({ "volume_types": [{"id": "fake_id", "name": "fake_name"}], "iteration": 1}) scenario = volume_types.CreateAndListEncryptionType( context) # case: create_specs is None specs = { "provider": "prov", "cipher": "cip", "key_size": "ks", "control_location": "cl" } scenario.run(create_specs=None, provider="prov", cipher="cip", key_size="ks", control_location="cl", search_opts="fakeopts") mock_service.create_encryption_type.assert_called_once_with( "fake_id", specs=specs) mock_service.list_encryption_type.assert_called_once_with( "fakeopts") # case: create_specs is not None scenario.run(create_specs="fakecreatespecs", provider="prov", cipher="cip", key_size="ks", control_location="cl", search_opts="fakeopts") mock_service.create_encryption_type.assert_called_with( "fake_id", specs="fakecreatespecs") mock_service.list_encryption_type.assert_called_with( "fakeopts") def test_create_and_set_volume_type_keys(self): mock_service = self.mock_cinder.return_value volume_type_key = {"volume_backend_name": "LVM_iSCSI"} description = "rally tests creating types" is_public = False scenario = volume_types.CreateAndSetVolumeTypeKeys( self._get_context()) scenario.run(volume_type_key, description=description, is_public=is_public) mock_service.create_volume_type.assert_called_once_with( description=description, is_public=is_public) mock_service.set_volume_type_keys.assert_called_once_with( mock_service.create_volume_type.return_value, metadata=volume_type_key) def test_create_and_update_encryption_type(self): mock_service = self.mock_cinder.return_value context = self._get_context() context.update({ "volume_types": [{"id": "fake_id", "name": "fake_name"}], "iteration": 1}) scenario = volume_types.CreateAndUpdateEncryptionType( context) create_specs = { "provider": "create_prov", "cipher": "create_cip", "key_size": "create_ks", "control_location": "create_cl" } update_specs = { "provider": "update_prov", "cipher": "update_cip", "key_size": "update_ks", "control_location": "update_cl" } scenario.run(create_provider="create_prov", create_cipher="create_cip", create_key_size="create_ks", create_control_location="create_cl", update_provider="update_prov", update_cipher="update_cip", update_key_size="update_ks", update_control_location="update_cl") mock_service.create_encryption_type.assert_called_once_with( "fake_id", specs=create_specs) mock_service.update_encryption_type.assert_called_once_with( "fake_id", specs=update_specs) def test_create_volume_type_add_and_list_type_access(self): mock_service = self.mock_cinder.return_value scenario = volume_types.CreateVolumeTypeAddAndListTypeAccess( self._get_context()) fake_type = mock.Mock() mock_service.create_volume_type.return_value = fake_type scenario.run(description=None, is_public=False) mock_service.create_volume_type.assert_called_once_with( description=None, is_public=False) mock_service.add_type_access.assert_called_once_with( fake_type, project="fake") mock_service.list_type_access.assert_called_once_with(fake_type)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,755
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/cinder/test_qos_specs.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally import exceptions as rally_exceptions from rally_openstack.task.scenarios.cinder import qos_specs from tests.unit import test class CinderQosTestCase(test.ScenarioTestCase): def setUp(self): super(CinderQosTestCase, self).setUp() patch = mock.patch( "rally_openstack.common.services.storage.block.BlockStorage") self.addCleanup(patch.stop) self.mock_cinder = patch.start() def _get_context(self): context = test.get_test_context() context.update({ "admin": { "id": "fake_user_id", "credential": mock.MagicMock() }, "user": {"id": "fake_user_id", "credential": mock.MagicMock()}, "tenant": {"id": "fake", "name": "fake"}}) return context def test_create_and_list_qos(self): mock_service = self.mock_cinder.return_value qos = mock.MagicMock() list_qos = [mock.MagicMock(), mock.MagicMock(), qos] specs = {"consumer": "both", "write_iops_sec": "10", "read_iops_sec": "1000"} scenario = qos_specs.CreateAndListQos(self._get_context()) mock_service.create_qos.return_value = qos mock_service.list_qos.return_value = list_qos scenario.run("both", "10", "1000") mock_service.create_qos.assert_called_once_with(specs) mock_service.list_qos.assert_called_once_with() def test_create_and_list_qos_with_fails(self): mock_service = self.mock_cinder.return_value qos = mock.MagicMock() list_qos = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock()] specs = {"consumer": "both", "write_iops_sec": "10", "read_iops_sec": "1000"} scenario = qos_specs.CreateAndListQos(self._get_context()) mock_service.create_qos.return_value = qos mock_service.list_qos.return_value = list_qos self.assertRaises(rally_exceptions.RallyAssertionError, scenario.run, "both", "10", "1000") mock_service.create_qos.assert_called_once_with(specs) mock_service.list_qos.assert_called_once_with() def test_create_and_get_qos(self): mock_service = self.mock_cinder.return_value qos = mock.MagicMock() specs = {"consumer": "both", "write_iops_sec": "10", "read_iops_sec": "1000"} scenario = qos_specs.CreateAndGetQos(self._get_context()) mock_service.create_qos.return_value = qos scenario.run("both", "10", "1000") mock_service.create_qos.assert_called_once_with(specs) mock_service.get_qos.assert_called_once_with(qos.id) def test_create_and_set_qos(self): mock_service = self.mock_cinder.return_value qos = mock.MagicMock() create_specs_args = {"consumer": "back-end", "write_iops_sec": "10", "read_iops_sec": "1000"} set_specs_args = {"consumer": "both", "write_iops_sec": "11", "read_iops_sec": "1001"} scenario = qos_specs.CreateAndSetQos(self._get_context()) mock_service.create_qos.return_value = qos scenario.run("back-end", "10", "1000", "both", "11", "1001") mock_service.create_qos.assert_called_once_with(create_specs_args) mock_service.set_qos.assert_called_once_with( qos=qos, set_specs_args=set_specs_args) def test_create_qos_associate_and_disassociate_type(self): mock_service = self.mock_cinder.return_value context = self._get_context() context.update({ "volume_types": [{"id": "fake_id", "name": "fake_name"}], "iteration": 1}) qos = mock.MagicMock() specs = {"consumer": "both", "write_iops_sec": "10", "read_iops_sec": "1000"} scenario = qos_specs.CreateQosAssociateAndDisassociateType(context) mock_service.create_qos.return_value = qos scenario.run("both", "10", "1000") mock_service.create_qos.assert_called_once_with(specs) mock_service.qos_associate_type.assert_called_once_with( qos_specs=qos, volume_type="fake_id") mock_service.qos_disassociate_type.assert_called_once_with( qos_specs=qos, volume_type="fake_id")
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,756
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/neutron/test_security_groups.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import copy import ddt from rally import exceptions as rally_exceptions from rally_openstack.task.scenarios.neutron import security_groups from tests.unit import test @ddt.ddt class NeutronSecurityGroup(test.TestCase): @ddt.data( {}, {"security_group_create_args": {}}, {"security_group_create_args": {"description": "fake-description"}}, ) @ddt.unpack def test_create_and_list_security_groups( self, security_group_create_args=None): scenario = security_groups.CreateAndListSecurityGroups() security_group_data = security_group_create_args or {} scenario._create_security_group = mock.Mock() scenario._list_security_groups = mock.Mock() scenario.run(security_group_create_args=security_group_create_args) scenario._create_security_group.assert_called_once_with( **security_group_data) scenario._list_security_groups.assert_called_once_with() @ddt.data( {}, {"security_group_create_args": {}}, {"security_group_create_args": {"description": "fake-description"}}, ) @ddt.unpack def test_create_and_show_security_group( self, security_group_create_args=None): scenario = security_groups.CreateAndShowSecurityGroup() security_group = mock.Mock() security_group_data = security_group_create_args or {} scenario._create_security_group = mock.Mock() scenario._show_security_group = mock.Mock() # Positive case scenario._create_security_group.return_value = security_group scenario.run(security_group_create_args=security_group_create_args) scenario._create_security_group.assert_called_once_with( **security_group_data) scenario._show_security_group.assert_called_once_with( scenario._create_security_group.return_value) @ddt.data( {}, {"security_group_create_args": {}}, {"security_group_create_args": {"description": "fake-description"}}, ) @ddt.unpack def test_create_and_show_security_group_with_none_group( self, security_group_create_args=None): scenario = security_groups.CreateAndShowSecurityGroup() security_group_data = security_group_create_args or {} scenario._create_security_group = mock.Mock() scenario._show_security_group = mock.Mock() # Negative case: security_group isn't created scenario._create_security_group.return_value = None self.assertRaises(rally_exceptions.RallyAssertionError, scenario.run, security_group_create_args) scenario._create_security_group.assert_called_with( **security_group_data) @ddt.data( {}, {"security_group_create_args": {}}, {"security_group_create_args": {"description": "fake-description"}}, ) @ddt.unpack def test_create_and_delete_security_groups( self, security_group_create_args=None): scenario = security_groups.CreateAndDeleteSecurityGroups() security_group_data = security_group_create_args or {} scenario._create_security_group = mock.Mock() scenario._delete_security_group = mock.Mock() scenario.run(security_group_create_args=security_group_create_args) scenario._create_security_group.assert_called_once_with( **security_group_data) scenario._delete_security_group.assert_called_once_with( scenario._create_security_group.return_value) @ddt.data( {}, {"security_group_create_args": {}}, {"security_group_create_args": {"description": "fake-description"}}, {"security_group_update_args": {}}, {"security_group_update_args": {"description": "fake-updated-descr"}}, ) @ddt.unpack def test_create_and_update_security_groups( self, security_group_create_args=None, security_group_update_args=None): scenario = security_groups.CreateAndUpdateSecurityGroups() security_group_data = security_group_create_args or {} security_group_update_data = security_group_update_args or {} scenario._create_security_group = mock.Mock() scenario._update_security_group = mock.Mock() scenario.run(security_group_create_args=security_group_create_args, security_group_update_args=security_group_update_args) scenario._create_security_group.assert_called_once_with( **security_group_data) scenario._update_security_group.assert_called_once_with( scenario._create_security_group.return_value, **security_group_update_data) @ddt.data( {}, {"security_group_args": {}}, {"security_group_args": {"description": "fake-description"}}, {"security_group_rule_args": {}}, {"security_group_rule_args": {"description": "fake-rule-descr"}}, {"security_group_rules_count": 2}, ) @ddt.unpack def test_create_and_list_security_group_rules( self, security_group_rules_count=1, security_group_args=None, security_group_rule_args=None): scenario = security_groups.CreateAndListSecurityGroupRules() security_group_data = security_group_args or {} security_group_rule_data = security_group_rule_args or {} security_group = mock.MagicMock() scenario._create_security_group = mock.MagicMock() scenario._create_security_group_rule = mock.MagicMock() scenario._list_security_group_rules = mock.MagicMock() # Positive case scenario._create_security_group.return_value = security_group scenario._create_security_group_rule.side_effect = [ {"security_group_rule": {"id": 1, "name": "f1", "port_range_min": 1, "port_range_max": 1}}, {"security_group_rule": {"id": 2, "name": "f2", "port_range_min": 2, "port_range_max": 2}}] scenario._list_security_group_rules.return_value = { "security_group_rules": [{"id": 1, "name": "f1", "port_range_min": 1, "port_range_max": 1}, {"id": 2, "name": "f2", "port_range_min": 2, "port_range_max": 2}, {"id": 3, "name": "f3", "port_range_min": 3, "port_range_max": 3}]} scenario.run(security_group_rules_count, security_group_args=security_group_data, security_group_rule_args=security_group_rule_data) scenario._create_security_group.assert_called_once_with( **security_group_data) calls = [] for i in range(security_group_rules_count): security_group_rule_data["port_range_min"] = i + 1 security_group_rule_data["port_range_max"] = i + 1 calls.append(mock.call(security_group["security_group"]["id"], **security_group_rule_data)) scenario._create_security_group_rule.assert_has_calls(calls) scenario._list_security_group_rules.assert_called_once_with() @ddt.data( {}, {"security_group_args": {}}, {"security_group_args": {"description": "fake-description"}}, {"security_group_rule_args": {}}, {"security_group_rule_args": {"description": "fake-rule-descr"}}, {"security_group_rules_count": 2}, ) @ddt.unpack def test_create_and_list_security_group_rules_with_fails( self, security_group_rules_count=1, security_group_args=None, security_group_rule_args=None): scenario = security_groups.CreateAndListSecurityGroupRules() security_group_data = security_group_args or {} security_group_rule_data = security_group_rule_args or {} rule_expected = copy.deepcopy(security_group_rule_data) security_group = mock.MagicMock() security_group_rule = {"security_group_rule": {"id": 1, "name": "f1"}} scenario._create_security_group = mock.MagicMock() scenario._create_security_group_rule = mock.MagicMock() scenario._list_security_group_rules = mock.MagicMock() scenario._create_security_group_rule.return_value = security_group_rule scenario._list_security_group_rules.return_value = { "security_group_rules": [{"id": 1, "name": "f1"}, {"id": 2, "name": "f2"}, {"id": 3, "name": "f3"}]} # Negative case1: security_group isn't created scenario._create_security_group.return_value = None self.assertRaises(rally_exceptions.RallyAssertionError, scenario.run, security_group_rules_count, security_group_data, security_group_rule_data) scenario._create_security_group.assert_called_with( **security_group_data) # Negative case2: security_group_rule isn't created scenario._create_security_group.return_value = security_group scenario._create_security_group_rule.return_value = None self.assertRaises(rally_exceptions.RallyAssertionError, scenario.run, security_group_rules_count, security_group_data, security_group_rule_data) scenario._create_security_group.assert_called_with( **security_group_data) rule_expected["port_range_min"] = 1 rule_expected["port_range_max"] = 1 scenario._create_security_group_rule.assert_called_with( security_group["security_group"]["id"], **rule_expected) # Negative case3: security_group_rule isn't listed scenario._create_security_group.return_value = security_group scenario._create_security_group_rule.reset_mock() scenario._create_security_group_rule.return_value = mock.MagicMock() self.assertRaises(rally_exceptions.RallyAssertionError, scenario.run, security_group_rules_count, security_group_data, security_group_rule_data) scenario._create_security_group.assert_called_with( **security_group_data) calls = [] for i in range(security_group_rules_count): rule_expected["port_range_min"] = i + 1 rule_expected["port_range_max"] = i + 1 calls.append(mock.call(security_group["security_group"]["id"], **rule_expected)) scenario._create_security_group_rule.assert_has_calls(calls, any_order=True) scenario._list_security_group_rules.assert_called_with() @ddt.data( {}, {"security_group_args": {}}, {"security_group_args": {"description": "fake-description"}}, {"security_group_rule_args": {}}, {"security_group_rule_args": {"description": "fake-rule-descr"}} ) @ddt.unpack def test_create_and_show_security_group_rule( self, security_group_args=None, security_group_rule_args=None): scenario = security_groups.CreateAndShowSecurityGroupRule() security_group_data = security_group_args or {} security_group_rule_data = security_group_rule_args or {} security_group = mock.MagicMock() security_group_rule = {"security_group_rule": {"id": 1, "name": "f1"}} scenario._create_security_group = mock.MagicMock() scenario._create_security_group_rule = mock.MagicMock() scenario._show_security_group_rule = mock.MagicMock() # Positive case scenario._create_security_group.return_value = security_group scenario._create_security_group_rule.return_value = security_group_rule scenario.run(security_group_args=security_group_data, security_group_rule_args=security_group_rule_data) scenario._create_security_group.assert_called_once_with( **security_group_data) scenario._create_security_group_rule.assert_called_once_with( security_group["security_group"]["id"], **security_group_rule_data) scenario._show_security_group_rule.assert_called_once_with( security_group_rule["security_group_rule"]["id"]) @ddt.data( {}, {"security_group_args": {}}, {"security_group_args": {"description": "fake-description"}}, {"security_group_rule_args": {}}, {"security_group_rule_args": {"description": "fake-rule-descr"}} ) @ddt.unpack def test_create_and_delete_security_group_rule( self, security_group_args=None, security_group_rule_args=None): scenario = security_groups.CreateAndDeleteSecurityGroupRule() security_group_data = security_group_args or {} security_group_rule_data = security_group_rule_args or {} security_group = mock.MagicMock() security_group_rule = {"security_group_rule": {"id": 1, "name": "f1"}} scenario._create_security_group = mock.MagicMock() scenario._create_security_group_rule = mock.MagicMock() scenario._delete_security_group_rule = mock.MagicMock() scenario._delete_security_group = mock.MagicMock() # Positive case scenario._create_security_group.return_value = security_group scenario._create_security_group_rule.return_value = security_group_rule scenario.run(security_group_args=security_group_data, security_group_rule_args=security_group_rule_data) scenario._create_security_group.assert_called_once_with( **security_group_data) scenario._create_security_group_rule.assert_called_once_with( security_group["security_group"]["id"], **security_group_rule_data) scenario._delete_security_group_rule.assert_called_once_with( security_group_rule["security_group_rule"]["id"]) scenario._delete_security_group.assert_called_once_with( security_group) @ddt.data( {}, {"security_group_args": {}}, {"security_group_args": {"description": "fake-description"}}, {"security_group_rule_args": {}}, {"security_group_rule_args": {"description": "fake-rule-descr"}}, ) @ddt.unpack def test_create_and_show_security_group_rule_with_fails( self, security_group_args=None, security_group_rule_args=None): scenario = security_groups.CreateAndShowSecurityGroupRule() security_group_data = security_group_args or {} security_group_rule_data = security_group_rule_args or {} security_group = mock.MagicMock() security_group_rule = {"security_group_rule": {"id": 1, "name": "f1"}} scenario._create_security_group = mock.MagicMock() scenario._create_security_group_rule = mock.MagicMock() scenario._show_security_group_rule = mock.MagicMock() scenario._create_security_group_rule.return_value = security_group_rule # Negative case1: security_group isn't created scenario._create_security_group.return_value = None self.assertRaises(rally_exceptions.RallyAssertionError, scenario.run, security_group_data, security_group_rule_data) scenario._create_security_group.assert_called_with( **security_group_data) # Negative case2: security_group_rule isn't created scenario._create_security_group.return_value = security_group scenario._create_security_group_rule.return_value = None self.assertRaises(rally_exceptions.RallyAssertionError, scenario.run, security_group_data, security_group_rule_data) scenario._create_security_group.assert_called_with( **security_group_data) scenario._create_security_group_rule.assert_called_with( security_group["security_group"]["id"], **security_group_rule_data)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,757
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/contexts/monasca/metrics.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import utils as rutils from rally.common import validation from rally_openstack.common import consts from rally_openstack.task import context from rally_openstack.task.scenarios.monasca import utils as monasca_utils @validation.add("required_platform", platform="openstack", users=True) @context.configure(name="monasca_metrics", platform="openstack", order=510) class MonascaMetricGenerator(context.OpenStackContext): """Creates Monasca Metrics.""" CONFIG_SCHEMA = { "type": "object", "$schema": consts.JSON_SCHEMA, "properties": { "name": { "type": "string" }, "dimensions": { "type": "object", "properties": { "region": { "type": "string" }, "service": { "type": "string" }, "hostname": { "type": "string" }, "url": { "type": "string" } }, "additionalProperties": False }, "metrics_per_tenant": { "type": "integer", "minimum": 1 }, "value_meta": { "type": "array", "items": { "type": "object", "properties": { "value_meta_key": { "type": "string" }, "value_meta_value": { "type": "string" } }, "additionalProperties": False } } }, "additionalProperties": False } DEFAULT_CONFIG = { "metrics_per_tenant": 2 } def setup(self): new_metric = {} if "dimensions" in self.config: new_metric = { "dimensions": self.config["dimensions"] } for user, tenant_id in self._iterate_per_tenants(): scenario = monasca_utils.MonascaScenario( context={"user": user, "task": self.context["task"]} ) for i in range(self.config["metrics_per_tenant"]): scenario._create_metrics(**new_metric) rutils.interruptable_sleep(0.001) rutils.interruptable_sleep( monasca_utils.CONF.openstack.monasca_metric_create_prepoll_delay, atomic_delay=1) def cleanup(self): # We don't have API for removal of metrics pass
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,758
openstack/rally-openstack
refs/heads/master
/tests/unit/task/contexts/watcher/test_audit_templates.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally_openstack.task.contexts.watcher import audit_templates from rally_openstack.task.scenarios.watcher import utils as watcher_utils from tests.unit import test CTX = "rally_openstack.task.contexts.watcher" SCN = "rally_openstack.task.scenarios.watcher" TYP = "rally_openstack.task.types" class AuditTemplateTestCase(test.ScenarioTestCase): @mock.patch("%s.utils.WatcherScenario._create_audit_template" % SCN, return_value=mock.MagicMock()) @mock.patch("%s.WatcherStrategy" % TYP,) @mock.patch("%s.WatcherGoal" % TYP) def test_setup(self, mock_watcher_goal, mock_watcher_strategy, mock_watcher_scenario__create_audit_template): users = [{"id": 1, "tenant_id": 1, "credential": mock.MagicMock()}] self.context.update({ "config": { "audit_templates": { "audit_templates_per_admin": 1, "fill_strategy": "random", "params": [ { "goal": { "name": "workload_balancing" }, "strategy": { "name": "workload_stabilization" } }, { "goal": { "name": "workload_balancing" }, "strategy": { "name": "workload_stabilization" } } ] }, }, "admin": { "credential": mock.MagicMock() }, "users": users }) audit_template = audit_templates.AuditTemplateGenerator(self.context) audit_template.setup() goal_id = mock_watcher_goal.return_value.pre_process.return_value strategy_id = ( mock_watcher_strategy.return_value.pre_process.return_value) mock_calls = [mock.call(goal_id, strategy_id)] mock_watcher_scenario__create_audit_template.assert_has_calls( mock_calls) @mock.patch("%s.audit_templates.resource_manager.cleanup" % CTX) def test_cleanup(self, mock_cleanup): audit_templates_mocks = [mock.Mock() for i in range(2)] self.context.update({ "admin": { "credential": mock.MagicMock() }, "audit_templates": audit_templates_mocks }) audit_templates_ctx = audit_templates.AuditTemplateGenerator( self.context) audit_templates_ctx.cleanup() mock_cleanup.assert_called_once_with( names=["watcher.action_plan", "watcher.audit_template"], admin=self.context["admin"], superclass=watcher_utils.WatcherScenario, task_id=self.context["owner_id"])
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,759
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/sahara/clusters.py
# Copyright 2014: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import logging from rally.task import types from rally.task import validation from rally_openstack.common import consts from rally_openstack.task import scenario from rally_openstack.task.scenarios.sahara import utils LOG = logging.getLogger(__name__) """Scenarios for Sahara clusters.""" @types.convert(flavor={"type": "nova_flavor"}, master_flavor={"type": "nova_flavor"}, worker_flavor={"type": "nova_flavor"}, neutron_net={"type": "neutron_network"}, floating_ip_pool={"type": "neutron_network"}) @validation.add("flavor_exists", param_name="master_flavor") @validation.add("flavor_exists", param_name="worker_flavor") @validation.add("required_contexts", contexts=["users", "sahara_image"]) @validation.add("number", param_name="workers_count", minval=1, integer_only=True) @validation.add("required_services", services=[consts.Service.SAHARA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["sahara"]}, name="SaharaClusters.create_and_delete_cluster", platform="openstack") class CreateAndDeleteCluster(utils.SaharaScenario): def run(self, workers_count, plugin_name, hadoop_version, master_flavor=None, worker_flavor=None, flavor=None, floating_ip_pool=None, volumes_per_node=None, volumes_size=None, auto_security_group=None, security_groups=None, node_configs=None, cluster_configs=None, enable_anti_affinity=False, enable_proxy=False, use_autoconfig=True): """Launch and delete a Sahara Cluster. This scenario launches a Hadoop cluster, waits until it becomes 'Active' and deletes it. :param flavor: Nova flavor that will be for nodes in the created node groups. Deprecated. :param master_flavor: Nova flavor that will be used for the master instance of the cluster :param worker_flavor: Nova flavor that will be used for the workers of the cluster :param workers_count: number of worker instances in a cluster :param plugin_name: name of a provisioning plugin :param hadoop_version: version of Hadoop distribution supported by the specified plugin. :param floating_ip_pool: floating ip pool name from which Floating IPs will be allocated. Sahara will determine automatically how to treat this depending on its own configurations. Defaults to None because in some cases Sahara may work w/o Floating IPs. :param volumes_per_node: number of Cinder volumes that will be attached to every cluster node :param volumes_size: size of each Cinder volume in GB :param auto_security_group: boolean value. If set to True Sahara will create a Security Group for each Node Group in the Cluster automatically. :param security_groups: list of security groups that will be used while creating VMs. If auto_security_group is set to True, this list can be left empty. :param node_configs: config dict that will be passed to each Node Group :param cluster_configs: config dict that will be passed to the Cluster :param enable_anti_affinity: If set to true the vms will be scheduled one per compute node. :param enable_proxy: Use Master Node of a Cluster as a Proxy node and do not assign floating ips to workers. :param use_autoconfig: If True, instances of the node group will be automatically configured during cluster creation. If False, the configuration values should be specify manually """ image_id = self.context["tenant"]["sahara"]["image"] LOG.debug("Using Image: %s" % image_id) cluster = self._launch_cluster( flavor_id=flavor, master_flavor_id=master_flavor, worker_flavor_id=worker_flavor, image_id=image_id, workers_count=workers_count, plugin_name=plugin_name, hadoop_version=hadoop_version, floating_ip_pool=floating_ip_pool, volumes_per_node=volumes_per_node, volumes_size=volumes_size, auto_security_group=auto_security_group, security_groups=security_groups, node_configs=node_configs, cluster_configs=cluster_configs, enable_anti_affinity=enable_anti_affinity, enable_proxy=enable_proxy, use_autoconfig=use_autoconfig) self._delete_cluster(cluster) @types.convert(flavor={"type": "nova_flavor"}, master_flavor={"type": "nova_flavor"}, worker_flavor={"type": "nova_flavor"}) @validation.add("flavor_exists", param_name="master_flavor") @validation.add("flavor_exists", param_name="worker_flavor") @validation.add("required_services", services=[consts.Service.SAHARA]) @validation.add("required_contexts", contexts=["users", "sahara_image"]) @validation.add("number", param_name="workers_count", minval=1, integer_only=True) @scenario.configure(context={"cleanup@openstack": ["sahara"]}, name="SaharaClusters.create_scale_delete_cluster", platform="openstack") class CreateScaleDeleteCluster(utils.SaharaScenario): def run(self, master_flavor, worker_flavor, workers_count, plugin_name, hadoop_version, deltas, flavor=None, floating_ip_pool=None, volumes_per_node=None, volumes_size=None, auto_security_group=None, security_groups=None, node_configs=None, cluster_configs=None, enable_anti_affinity=False, enable_proxy=False, use_autoconfig=True): """Launch, scale and delete a Sahara Cluster. This scenario launches a Hadoop cluster, waits until it becomes 'Active'. Then a series of scale operations is applied. The scaling happens according to numbers listed in :param deltas. Ex. if deltas is set to [2, -2] it means that the first scaling operation will add 2 worker nodes to the cluster and the second will remove two. :param flavor: Nova flavor that will be for nodes in the created node groups. Deprecated. :param master_flavor: Nova flavor that will be used for the master instance of the cluster :param worker_flavor: Nova flavor that will be used for the workers of the cluster :param workers_count: number of worker instances in a cluster :param plugin_name: name of a provisioning plugin :param hadoop_version: version of Hadoop distribution supported by the specified plugin. :param deltas: list of integers which will be used to add or remove worker nodes from the cluster :param floating_ip_pool: floating ip pool name from which Floating IPs will be allocated. Sahara will determine automatically how to treat this depending on its own configurations. Defaults to None because in some cases Sahara may work w/o Floating IPs. :param neutron_net_id: id of a Neutron network that will be used for fixed IPs. This parameter is ignored when Nova Network is set up. :param volumes_per_node: number of Cinder volumes that will be attached to every cluster node :param volumes_size: size of each Cinder volume in GB :param auto_security_group: boolean value. If set to True Sahara will create a Security Group for each Node Group in the Cluster automatically. :param security_groups: list of security groups that will be used while creating VMs. If auto_security_group is set to True this list can be left empty. :param node_configs: configs dict that will be passed to each Node Group :param cluster_configs: configs dict that will be passed to the Cluster :param enable_anti_affinity: If set to true the vms will be scheduled one per compute node. :param enable_proxy: Use Master Node of a Cluster as a Proxy node and do not assign floating ips to workers. :param use_autoconfig: If True, instances of the node group will be automatically configured during cluster creation. If False, the configuration values should be specify manually """ image_id = self.context["tenant"]["sahara"]["image"] LOG.debug("Using Image: %s" % image_id) cluster = self._launch_cluster( flavor_id=flavor, master_flavor_id=master_flavor, worker_flavor_id=worker_flavor, image_id=image_id, workers_count=workers_count, plugin_name=plugin_name, hadoop_version=hadoop_version, floating_ip_pool=floating_ip_pool, volumes_per_node=volumes_per_node, volumes_size=volumes_size, auto_security_group=auto_security_group, security_groups=security_groups, node_configs=node_configs, cluster_configs=cluster_configs, enable_anti_affinity=enable_anti_affinity, enable_proxy=enable_proxy, use_autoconfig=use_autoconfig) for delta in deltas: # The Cluster is fetched every time so that its node groups have # correct 'count' values. cluster = self.clients("sahara").clusters.get(cluster.id) if delta == 0: # Zero scaling makes no sense. continue elif delta > 0: self._scale_cluster_up(cluster, delta) elif delta < 0: self._scale_cluster_down(cluster, delta) self._delete_cluster(cluster)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,760
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/nova/flavors.py
# Copyright 2015: Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import logging from rally.task import validation from rally_openstack.common import consts from rally_openstack.task import scenario from rally_openstack.task.scenarios.nova import utils """Scenarios for Nova flavors.""" LOG = logging.getLogger(__name__) @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(name="NovaFlavors.list_flavors", platform="openstack") class ListFlavors(utils.NovaScenario): def run(self, detailed=True, is_public=True, marker=None, min_disk=None, min_ram=None, limit=None, sort_key=None, sort_dir=None): """List all flavors. Measure the "nova flavor-list" command performance. :param detailed: Whether flavor needs to be return with details (optional). :param is_public: Filter flavors with provided access type (optional). None means give all flavors and only admin has query access to all flavor types. :param marker: Begin returning flavors that appear later in the flavor list than that represented by this flavor id (optional). :param min_disk: Filters the flavors by a minimum disk space, in GiB. :param min_ram: Filters the flavors by a minimum RAM, in MB. :param limit: maximum number of flavors to return (optional). :param sort_key: Flavors list sort key (optional). :param sort_dir: Flavors list sort direction (optional). """ self._list_flavors(detailed=detailed, is_public=is_public, marker=marker, min_disk=min_disk, min_ram=min_ram, limit=limit, sort_key=sort_key, sort_dir=sort_dir) @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True) @scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, name="NovaFlavors.create_and_list_flavor_access", platform="openstack") class CreateAndListFlavorAccess(utils.NovaScenario): def run(self, ram, vcpus, disk, flavorid="auto", ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): """Create a non-public flavor and list its access rules :param ram: Memory in MB for the flavor :param vcpus: Number of VCPUs for the flavor :param disk: Size of local disk in GB :param flavorid: ID for the flavor (optional). You can use the reserved value ``"auto"`` to have Nova generate a UUID for the flavor in cases where you cannot simply pass ``None``. :param ephemeral: Ephemeral space size in GB (default 0). :param swap: Swap space in MB :param rxtx_factor: RX/TX factor :param is_public: Make flavor accessible to the public (default true). """ # NOTE(pirsriva): access rules can be listed # only for non-public flavors if is_public: LOG.warning("is_public cannot be set to True for listing " "flavor access rules. Setting is_public to False") is_public = False flavor = self._create_flavor(ram, vcpus, disk, flavorid=flavorid, ephemeral=ephemeral, swap=swap, rxtx_factor=rxtx_factor, is_public=is_public) self.assertTrue(flavor) self._list_flavor_access(flavor.id) @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True) @scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, name="NovaFlavors.create_flavor_and_add_tenant_access", platform="openstack") class CreateFlavorAndAddTenantAccess(utils.NovaScenario): def run(self, ram, vcpus, disk, flavorid="auto", ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): """Create a flavor and Add flavor access for the given tenant. :param ram: Memory in MB for the flavor :param vcpus: Number of VCPUs for the flavor :param disk: Size of local disk in GB :param flavorid: ID for the flavor (optional). You can use the reserved value ``"auto"`` to have Nova generate a UUID for the flavor in cases where you cannot simply pass ``None``. :param ephemeral: Ephemeral space size in GB (default 0). :param swap: Swap space in MB :param rxtx_factor: RX/TX factor :param is_public: Make flavor accessible to the public (default true). """ flavor = self._create_flavor(ram, vcpus, disk, flavorid=flavorid, ephemeral=ephemeral, swap=swap, rxtx_factor=rxtx_factor, is_public=is_public) self.assertTrue(flavor) self._add_tenant_access(flavor.id, self.context["tenant"]["id"]) @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True) @scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, name="NovaFlavors.create_flavor", platform="openstack") class CreateFlavor(utils.NovaScenario): def run(self, ram, vcpus, disk, flavorid="auto", ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): """Create a flavor. :param ram: Memory in MB for the flavor :param vcpus: Number of VCPUs for the flavor :param disk: Size of local disk in GB :param flavorid: ID for the flavor (optional). You can use the reserved value ``"auto"`` to have Nova generate a UUID for the flavor in cases where you cannot simply pass ``None``. :param ephemeral: Ephemeral space size in GB (default 0). :param swap: Swap space in MB :param rxtx_factor: RX/TX factor :param is_public: Make flavor accessible to the public (default true). """ self._create_flavor(ram, vcpus, disk, flavorid=flavorid, ephemeral=ephemeral, swap=swap, rxtx_factor=rxtx_factor, is_public=is_public) @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True) @scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, name="NovaFlavors.create_and_get_flavor", platform="openstack") class CreateAndGetFlavor(utils.NovaScenario): """Scenario for create and get flavor.""" def run(self, ram, vcpus, disk, flavorid="auto", ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): """Create flavor and get detailed information of the flavor. :param ram: Memory in MB for the flavor :param vcpus: Number of VCPUs for the flavor :param disk: Size of local disk in GB :param flavorid: ID for the flavor (optional). You can use the reserved value ``"auto"`` to have Nova generate a UUID for the flavor in cases where you cannot simply pass ``None``. :param ephemeral: Ephemeral space size in GB (default 0). :param swap: Swap space in MB :param rxtx_factor: RX/TX factor :param is_public: Make flavor accessible to the public (default true). """ flavor = self._create_flavor(ram, vcpus, disk, flavorid=flavorid, ephemeral=ephemeral, swap=swap, rxtx_factor=rxtx_factor, is_public=is_public) self._get_flavor(flavor.id) @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True) @scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, name="NovaFlavors.create_and_delete_flavor", platform="openstack") class CreateAndDeleteFlavor(utils.NovaScenario): def run(self, ram, vcpus, disk, flavorid="auto", ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): """Create flavor and delete the flavor. :param ram: Memory in MB for the flavor :param vcpus: Number of VCPUs for the flavor :param disk: Size of local disk in GB :param flavorid: ID for the flavor (optional). You can use the reserved value ``"auto"`` to have Nova generate a UUID for the flavor in cases where you cannot simply pass ``None``. :param ephemeral: Ephemeral space size in GB (default 0). :param swap: Swap space in MB :param rxtx_factor: RX/TX factor :param is_public: Make flavor accessible to the public (default true). """ flavor = self._create_flavor(ram, vcpus, disk, flavorid=flavorid, ephemeral=ephemeral, swap=swap, rxtx_factor=rxtx_factor, is_public=is_public) self._delete_flavor(flavor.id) @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True) @scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, name="NovaFlavors.create_flavor_and_set_keys", platform="openstack") class CreateFlavorAndSetKeys(utils.NovaScenario): def run(self, ram, vcpus, disk, extra_specs, flavorid="auto", ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): """Create flavor and set keys to the flavor. Measure the "nova flavor-key" command performance. the scenario first create a flavor,then add the extra specs to it. :param ram: Memory in MB for the flavor :param vcpus: Number of VCPUs for the flavor :param disk: Size of local disk in GB :param extra_specs: additional arguments for flavor set keys :param flavorid: ID for the flavor (optional). You can use the reserved value ``"auto"`` to have Nova generate a UUID for the flavor in cases where you cannot simply pass ``None``. :param ephemeral: Ephemeral space size in GB (default 0). :param swap: Swap space in MB :param rxtx_factor: RX/TX factor :param is_public: Make flavor accessible to the public (default true). """ flavor = self._create_flavor(ram, vcpus, disk, flavorid=flavorid, ephemeral=ephemeral, swap=swap, rxtx_factor=rxtx_factor, is_public=is_public) self._set_flavor_keys(flavor, extra_specs)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,761
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/ironic/nodes.py
# Copyright 2015: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import logging from rally.task import validation from rally_openstack.common import consts from rally_openstack.task import scenario from rally_openstack.task.scenarios.ironic import utils """Scenarios for ironic nodes.""" @logging.log_deprecated_args("Useless arguments detected", "0.10.0", ("marker", "limit", "sort_key"), once=True) @validation.add("required_services", services=[consts.Service.IRONIC]) @validation.add("restricted_parameters", param_names="name") @validation.add("required_platform", platform="openstack", admin=True) @scenario.configure(context={"admin_cleanup@openstack": ["ironic"]}, name="IronicNodes.create_and_list_node", platform="openstack") class CreateAndListNode(utils.IronicScenario): def run(self, driver, properties=None, associated=None, maintenance=None, detail=False, sort_dir=None, marker=None, limit=None, sort_key=None, **kwargs): """Create and list nodes. :param driver: The name of the driver used to manage this Node. :param properties: Key/value pair describing the physical characteristics of the node. :param associated: Optional argument of list request. Either a Boolean or a string representation of a Boolean that indicates whether to return a list of associated (True or "True") or unassociated (False or "False") nodes. :param maintenance: Optional argument of list request. Either a Boolean or a string representation of a Boolean that indicates whether to return nodes in maintenance mode (True or "True"), or not in maintenance mode (False or "False"). :param detail: Optional, boolean whether to return detailed information about nodes. :param sort_dir: Optional, direction of sorting, either 'asc' (the default) or 'desc'. :param marker: DEPRECATED since Rally 0.10.0 :param limit: DEPRECATED since Rally 0.10.0 :param sort_key: DEPRECATED since Rally 0.10.0 :param kwargs: Optional additional arguments for node creation """ node = self._create_node(driver, properties, **kwargs) list_nodes = self._list_nodes( associated=associated, maintenance=maintenance, detail=detail, sort_dir=sort_dir) self.assertIn(node.name, [n.name for n in list_nodes]) @validation.add("required_services", services=[consts.Service.IRONIC]) @validation.add("restricted_parameters", param_names="name") @validation.add("required_platform", platform="openstack", admin=True) @scenario.configure(context={"admin_cleanup@openstack": ["ironic"]}, name="IronicNodes.create_and_delete_node", platform="openstack") class CreateAndDeleteNode(utils.IronicScenario): def run(self, driver, properties=None, **kwargs): """Create and delete node. :param driver: The name of the driver used to manage this Node. :param properties: Key/value pair describing the physical characteristics of the node. :param kwargs: Optional additional arguments for node creation """ node = self._create_node(driver, properties, **kwargs) self._delete_node(node)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,762
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/contexts/swift/utils.py
# Copyright 2015: Cisco Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import tempfile from rally.common import broker from rally_openstack.task.scenarios.swift import utils as swift_utils class SwiftObjectMixin(object): """Mix-in method for Swift Object Context.""" def _create_containers(self, containers_per_tenant, threads): """Create containers and store results in Rally context. :param containers_per_tenant: int, number of containers to create per tenant :param threads: int, number of threads to use for broker pattern :returns: list of tuples containing (account, container) """ containers = [] def publish(queue): for user, tenant_id in self._iterate_per_tenants(): self.context["tenants"][tenant_id]["containers"] = [] for i in range(containers_per_tenant): args = (user, self.context["tenants"][tenant_id]["containers"]) queue.append(args) def consume(cache, args): user, tenant_containers = args if user["id"] not in cache: cache[user["id"]] = swift_utils.SwiftScenario( {"user": user, "task": self.context.get("task", {})}) container_name = cache[user["id"]]._create_container() tenant_containers.append({"user": user, "container": container_name, "objects": []}) containers.append((user["tenant_id"], container_name)) broker.run(publish, consume, threads) return containers def _create_objects(self, objects_per_container, object_size, threads): """Create objects and store results in Rally context. :param objects_per_container: int, number of objects to create per container :param object_size: int, size of created swift objects in byte :param threads: int, number of threads to use for broker pattern :returns: list of tuples containing (account, container, object) """ objects = [] with tempfile.TemporaryFile() as dummy_file: # set dummy file to specified object size dummy_file.truncate(object_size) def publish(queue): for tenant_id in self.context["tenants"]: items = self.context["tenants"][tenant_id]["containers"] for container in items: for i in range(objects_per_container): queue.append(container) def consume(cache, container): user = container["user"] if user["id"] not in cache: cache[user["id"]] = swift_utils.SwiftScenario( {"user": user, "task": self.context.get("task", {})}) dummy_file.seek(0) object_name = cache[user["id"]]._upload_object( container["container"], dummy_file)[1] container["objects"].append(object_name) objects.append((user["tenant_id"], container["container"], object_name)) broker.run(publish, consume, threads) return objects def _delete_containers(self, threads): """Delete containers created by Swift context and update Rally context. :param threads: int, number of threads to use for broker pattern """ def publish(queue): for tenant_id in self.context["tenants"]: containers = self.context["tenants"][tenant_id]["containers"] for container in containers[:]: args = container, containers queue.append(args) def consume(cache, args): container, tenant_containers = args user = container["user"] if user["id"] not in cache: cache[user["id"]] = swift_utils.SwiftScenario( {"user": user, "task": self.context.get("task", {})}) cache[user["id"]]._delete_container(container["container"]) tenant_containers.remove(container) broker.run(publish, consume, threads) def _delete_objects(self, threads): """Delete objects created by Swift context and update Rally context. :param threads: int, number of threads to use for broker pattern """ def publish(queue): for tenant_id in self.context["tenants"]: containers = self.context["tenants"][tenant_id]["containers"] for container in containers: for object_name in container["objects"][:]: args = object_name, container queue.append(args) def consume(cache, args): object_name, container = args user = container["user"] if user["id"] not in cache: cache[user["id"]] = swift_utils.SwiftScenario( {"user": user, "task": self.context.get("task", {})}) cache[user["id"]]._delete_object(container["container"], object_name) container["objects"].remove(object_name) broker.run(publish, consume, threads)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,763
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/contexts/cinder/volume_types.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import logging from rally.common import utils from rally.common import validation from rally_openstack.common import consts from rally_openstack.common import osclients from rally_openstack.common.services.storage import block from rally_openstack.task.cleanup import manager as resource_manager from rally_openstack.task import context LOG = logging.getLogger(__name__) @validation.add("required_platform", platform="openstack", admin=True) @context.configure(name="volume_types", platform="openstack", order=410) class VolumeTypeGenerator(context.OpenStackContext): """Adds cinder volumes types.""" CONFIG_SCHEMA = { "type": "array", "$schema": consts.JSON_SCHEMA, "items": {"type": "string"} } def setup(self): admin_clients = osclients.Clients( self.context.get("admin", {}).get("credential")) cinder_service = block.BlockStorage( admin_clients, name_generator=self.generate_random_name, atomic_inst=self.atomic_actions()) self.context["volume_types"] = [] for vtype_name in self.config: LOG.debug("Creating Cinder volume type %s" % vtype_name) vtype = cinder_service.create_volume_type(vtype_name) self.context["volume_types"].append({"id": vtype.id, "name": vtype_name}) def cleanup(self): mather = utils.make_name_matcher(*self.config) resource_manager.cleanup( names=["cinder.volume_types"], admin=self.context["admin"], superclass=mather, task_id=self.get_owner_id())
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,764
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/manila/utils.py
# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random from rally.common import cfg from rally import exceptions from rally.task import atomic from rally.task import utils from rally_openstack.task.contexts.manila import consts from rally_openstack.task import scenario CONF = cfg.CONF class ManilaScenario(scenario.OpenStackScenario): """Base class for Manila scenarios with basic atomic actions.""" @atomic.action_timer("manila.create_share") def _create_share(self, share_proto, size=1, **kwargs): """Create a share. :param share_proto: share protocol for new share, available values are NFS, CIFS, GlusterFS, HDFS and CEPHFS. :param size: size of a share in GB :param snapshot_id: ID of the snapshot :param name: name of new share :param description: description of a share :param metadata: optional metadata to set on share creation :param share_network: either instance of ShareNetwork or str with ID :param share_type: either instance of ShareType or str with ID :param is_public: defines whether to set share as public or not. :returns: instance of :class:`Share` """ if self.context: share_networks = self.context.get("tenant", {}).get( consts.SHARE_NETWORKS_CONTEXT_NAME, {}).get( "share_networks", []) if share_networks and not kwargs.get("share_network"): kwargs["share_network"] = share_networks[ self.context["iteration"] % len(share_networks)]["id"] if not kwargs.get("name"): kwargs["name"] = self.generate_random_name() share = self.clients("manila").shares.create( share_proto, size, **kwargs) self.sleep_between(CONF.openstack.manila_share_create_prepoll_delay) share = utils.wait_for_status( share, ready_statuses=["available"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.manila_share_create_timeout, check_interval=CONF.openstack.manila_share_create_poll_interval, ) return share @atomic.action_timer("manila.delete_share") def _delete_share(self, share): """Delete the given share. :param share: :class:`Share` """ share.delete() error_statuses = ("error_deleting", ) utils.wait_for_status( share, ready_statuses=["deleted"], check_deletion=True, update_resource=utils.get_from_manager(error_statuses), timeout=CONF.openstack.manila_share_delete_timeout, check_interval=CONF.openstack.manila_share_delete_poll_interval) def _export_location(self, share): """Export share location. :param share: :class:`Share` """ location = share.export_locations return location def _get_access_from_share(self, share, access_id): """Get access from share :param share: :class: `Share` :param access_id: The id of the access we want to get :returns: The access object from the share :raises GetResourceNotFound: if the access is not in the share """ try: return next(access for access in share.access_list() if access.id == access_id) except StopIteration: raise exceptions.GetResourceNotFound(resource=access_id) def _update_resource_in_allow_access_share(self, share, access_id): """Helper to update resource state in allow_access_share method :param share: :class:`Share` :param access_id: id of the access :returns: A function to be used in wait_for_status for the update resource """ def _is_created(_): return self._get_access_from_share(share, access_id) return _is_created @atomic.action_timer("manila.access_allow_share") def _allow_access_share(self, share, access_type, access, access_level): """Allow access to a share :param share: :class:`Share` :param access_type: represents the access type (e.g: 'ip', 'domain'...) :param access: represents the object (e.g: '127.0.0.1'...) :param access_level: access level to the share (e.g: 'rw', 'ro') """ access_result = share.allow(access_type, access, access_level) # Get access from the list of accesses of the share access = next(access for access in share.access_list() if access.id == access_result["id"]) fn = self._update_resource_in_allow_access_share(share, access_result["id"]) # We check if the access in that access_list has the active state utils.wait_for_status( access, ready_statuses=["active"], update_resource=fn, check_interval=CONF.openstack.manila_access_create_poll_interval, timeout=CONF.openstack.manila_access_create_timeout) return access_result def _update_resource_in_deny_access_share(self, share, access_id): """Helper to update resource state in deny_access_share method :param share: :class:`Share` :param access_id: id of the access :returns: A function to be used in wait_for_status for the update resource """ def _is_deleted(_): access = self._get_access_from_share(share, access_id) return access return _is_deleted @atomic.action_timer("manila.access_deny_share") def _deny_access_share(self, share, access_id): """Deny access to a share :param share: :class:`Share` :param access_id: id of the access to delete """ # Get the access element that was created in the first place access = self._get_access_from_share(share, access_id) share.deny(access_id) fn = self._update_resource_in_deny_access_share(share, access_id) utils.wait_for_status( access, ready_statuses=["deleted"], update_resource=fn, check_deletion=True, check_interval=CONF.openstack.manila_access_delete_poll_interval, timeout=CONF.openstack.manila_access_delete_timeout) @atomic.action_timer("manila.list_shares") def _list_shares(self, detailed=True, search_opts=None): """Returns user shares list. :param detailed: defines either to return detailed list of objects or not. :param search_opts: container of search opts such as "name", "host", "share_type", etc. """ return self.clients("manila").shares.list( detailed=detailed, search_opts=search_opts) @atomic.action_timer("manila.extend_share") def _extend_share(self, share, new_size): """Extend the given share :param share: :class:`Share` :param new_size: new size of the share """ self.clients("manila").shares.extend(share, new_size) utils.wait_for_status( share, ready_statuses=["available"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.manila_share_create_timeout, check_interval=CONF.openstack.manila_share_create_poll_interval) @atomic.action_timer("manila.shrink_share") def _shrink_share(self, share, new_size): """Shrink the given share :param share: :class:`Share` :param new_size: new size of the share """ share.shrink(new_size) utils.wait_for_status( share, ready_statuses=["available"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.manila_share_create_timeout, check_interval=CONF.openstack.manila_share_create_poll_interval) @atomic.action_timer("manila.create_share_network") def _create_share_network(self, neutron_net_id=None, neutron_subnet_id=None, nova_net_id=None, description=None): """Create share network. :param neutron_net_id: ID of Neutron network :param neutron_subnet_id: ID of Neutron subnet :param nova_net_id: ID of Nova network :param description: share network description :returns: instance of :class:`ShareNetwork` """ share_network = self.clients("manila").share_networks.create( neutron_net_id=neutron_net_id, neutron_subnet_id=neutron_subnet_id, nova_net_id=nova_net_id, name=self.generate_random_name(), description=description) return share_network @atomic.action_timer("manila.delete_share_network") def _delete_share_network(self, share_network): """Delete share network. :param share_network: instance of :class:`ShareNetwork`. """ share_network.delete() utils.wait_for_status( share_network, ready_statuses=["deleted"], check_deletion=True, update_resource=utils.get_from_manager(), timeout=CONF.openstack.manila_share_delete_timeout, check_interval=CONF.openstack.manila_share_delete_poll_interval) @atomic.action_timer("manila.list_share_networks") def _list_share_networks(self, detailed=True, search_opts=None): """List share networks. :param detailed: defines either to return detailed list of objects or not. :param search_opts: container of search opts such as "project_id" and "name". :returns: list of instances of :class:`ShareNetwork` """ share_networks = self.clients("manila").share_networks.list( detailed=detailed, search_opts=search_opts) return share_networks @atomic.action_timer("manila.list_share_servers") def _list_share_servers(self, search_opts=None): """List share servers. Admin only. :param search_opts: set of key-value pairs to filter share servers by. Example: {"share_network": "share_network_name_or_id"} :returns: list of instances of :class:`ShareServer` """ share_servers = self.admin_clients("manila").share_servers.list( search_opts=search_opts) return share_servers @atomic.action_timer("manila.create_security_service") def _create_security_service(self, security_service_type, dns_ip=None, server=None, domain=None, user=None, password=None, description=None): """Create security service. 'Security service' is data container in Manila that stores info about auth services 'Active Directory', 'Kerberos' and catalog service 'LDAP' that should be used for shares. :param security_service_type: security service type, permitted values are 'ldap', 'kerberos' or 'active_directory'. :param dns_ip: dns ip address used inside tenant's network :param server: security service server ip address or hostname :param domain: security service domain :param user: security identifier used by tenant :param password: password used by user :param description: security service description :returns: instance of :class:`SecurityService` """ security_service = self.clients("manila").security_services.create( type=security_service_type, dns_ip=dns_ip, server=server, domain=domain, user=user, password=password, name=self.generate_random_name(), description=description) return security_service @atomic.action_timer("manila.delete_security_service") def _delete_security_service(self, security_service): """Delete security service. :param security_service: instance of :class:`SecurityService`. """ security_service.delete() utils.wait_for_status( security_service, ready_statuses=["deleted"], check_deletion=True, update_resource=utils.get_from_manager(), timeout=CONF.openstack.manila_share_delete_timeout, check_interval=CONF.openstack.manila_share_delete_poll_interval) @atomic.action_timer("manila.add_security_service_to_share_network") def _add_security_service_to_share_network(self, share_network, security_service): """Associate given security service with a share network. :param share_network: ID or instance of :class:`ShareNetwork`. :param security_service: ID or instance of :class:`SecurityService`. :returns: instance of :class:`ShareNetwork`. """ share_network = self.clients( "manila").share_networks.add_security_service( share_network, security_service) return share_network @atomic.action_timer("manila.set_metadata") def _set_metadata(self, share, sets=1, set_size=1, key_min_length=1, key_max_length=256, value_min_length=1, value_max_length=1024): """Sets share metadata. :param share: the share to set metadata on :param sets: how many operations to perform :param set_size: number of metadata keys to set in each operation :param key_min_length: minimal size of metadata key to set :param key_max_length: maximum size of metadata key to set :param value_min_length: minimal size of metadata value to set :param value_max_length: maximum size of metadata value to set :returns: A list of keys that were set :raises exceptions.InvalidArgumentsException: if invalid arguments were provided. """ if not (key_min_length <= key_max_length and value_min_length <= value_max_length): raise exceptions.InvalidArgumentsException( "Min length for keys and values of metadata can not be bigger " "than maximum length.") keys = [] for i in range(sets): metadata = {} for j in range(set_size): if key_min_length == key_max_length: key_length = key_min_length else: key_length = random.choice( range(key_min_length, key_max_length)) if value_min_length == value_max_length: value_length = value_min_length else: value_length = random.choice( range(value_min_length, value_max_length)) key = self._generate_random_part(length=key_length) keys.append(key) metadata[key] = self._generate_random_part(length=value_length) self.clients("manila").shares.set_metadata(share["id"], metadata) return keys @atomic.action_timer("manila.delete_metadata") def _delete_metadata(self, share, keys, delete_size=3): """Deletes share metadata. :param share: The share to delete metadata from. :param delete_size: number of metadata keys to delete using one single call. :param keys: a list or tuple of keys to choose deletion candidates from :raises exceptions.InvalidArgumentsException: if invalid arguments were provided. """ if not (isinstance(keys, list) and keys): raise exceptions.InvalidArgumentsException( "Param 'keys' should be non-empty 'list'. keys = '%s'" % keys) for i in range(0, len(keys), delete_size): self.clients("manila").shares.delete_metadata( share["id"], keys[i:i + delete_size])
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,765
openstack/rally-openstack
refs/heads/master
/rally_openstack/_compat.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import importlib import importlib.abc import importlib.machinery import importlib.util import sys import warnings class _MoveSpec(object): def __init__(self, deprecated, new, release): """init moved module info :param deprecated: a module name that is deprecated :param new: a module name that should be used instead :param release: A release when the module was deprecated """ self.deprecated = deprecated self.new = new self.deprecated_path = self.deprecated.replace(".", "/") self.new_path = self.new.replace(".", "/") self.release = release def get_new_name(self, fullname): """Get the new name for deprecated module.""" return fullname.replace(self.deprecated, self.new) def get_deprecated_path(self, path): """Get a path to the deprecated module.""" return path.replace(self.new_path, self.deprecated_path) _MOVES = [ _MoveSpec( deprecated="rally_openstack.embedcharts", new="rally_openstack.task.ui.charts", release="2.0.0" ), _MoveSpec( deprecated="rally_openstack.cleanup", new="rally_openstack.task.cleanup", release="2.0.0" ), _MoveSpec( deprecated="rally_openstack.contexts", new="rally_openstack.task.contexts", release="2.0.0" ), _MoveSpec( deprecated="rally_openstack.hook", new="rally_openstack.task.hooks", release="2.0.0" ), _MoveSpec( deprecated="rally_openstack.scenario", new="rally_openstack.task.scenario", release="2.0.0" ), _MoveSpec( deprecated="rally_openstack.scenarios", new="rally_openstack.task.scenarios", release="2.0.0" ), _MoveSpec( deprecated="rally_openstack.types", new="rally_openstack.task.types", release="2.0.0" ), _MoveSpec( deprecated="rally_openstack.platforms", new="rally_openstack.environment.platforms", release="2.0.0" ), _MoveSpec( deprecated="rally_openstack.service", new="rally_openstack.common.service", release="2.0.0" ), _MoveSpec( deprecated="rally_openstack.services", new="rally_openstack.common.services", release="2.0.0" ), _MoveSpec( deprecated="rally_openstack.validators", new="rally_openstack.common.validators", release="2.0.0" ), _MoveSpec( deprecated="rally_openstack.wrappers", new="rally_openstack.common.wrappers", release="2.0.0" ), _MoveSpec( deprecated="rally_openstack.credential", new="rally_openstack.common.credential", release="2.0.0" ), _MoveSpec( deprecated="rally_openstack.osclients", new="rally_openstack.common.osclients", release="2.0.0" ), _MoveSpec( deprecated="rally_openstack.consts", new="rally_openstack.common.consts", release="2.0.0" ), _MoveSpec( deprecated="rally_openstack.exceptions", new="rally_openstack.common.exceptions", release="2.0.0" ), _MoveSpec( deprecated="rally_openstack.cfg", new="rally_openstack.common.cfg", release="2.0.0" ), ] class ModuleLoader(object): def __init__(self, move_spec): self.move_spec = move_spec def create_module(self, spec): # Python interpreter will use the default module creator in case of # None return value. return None def exec_module(self, module): """Module executor.""" full_name = self.move_spec.get_new_name(module.__name__) original_module = importlib.import_module(full_name) if original_module.__file__.endswith("__init__.py"): # NOTE(andreykurilin): In case we need to list submodules the # next code can be used: # # import pkgutil # # for m in pkgutil.iter_modules(original_module.__path__): # module.__dict__[m.name] = importlib.import_module( # f"{full_name}.{m.name}") module.__path__ = [ self.move_spec.get_deprecated_path(original_module.__path__[0]) ] for item in dir(original_module): if item.startswith("_"): continue module.__dict__[item] = original_module.__dict__[item] module.__file__ = self.move_spec.get_deprecated_path( original_module.__file__) return module class ModulesMovementsHandler(importlib.abc.MetaPathFinder): @classmethod def _process_spec(cls, fullname, spec): """Make module spec and print warning message if needed.""" if spec.deprecated == fullname: warnings.warn( f"Module {fullname} is deprecated since rally-openstack " f"{spec.release}. Use {spec.get_new_name(fullname)} instead.", stacklevel=3 ) return importlib.machinery.ModuleSpec(fullname, ModuleLoader(spec)) @classmethod def find_spec(cls, fullname, path=None, target=None): """This functions is what gets executed by the loader.""" for spec in _MOVES: if spec.deprecated in fullname: return cls._process_spec(fullname, spec) def init(): """Adds our custom module loader.""" sys.meta_path.append(ModulesMovementsHandler())
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,766
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/gnocchi/archive_policy_rule.py
# Copyright 2017 Red Hat, Inc. <http://www.redhat.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.task import validation from rally_openstack.common import consts from rally_openstack.task import scenario from rally_openstack.task.scenarios.gnocchi import utils as gnocchiutils """Scenarios for Gnocchi archive policy rule.""" @validation.add("required_services", services=[consts.Service.GNOCCHI]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(name="GnocchiArchivePolicyRule.list_archive_policy_rule") class ListArchivePolicyRule(gnocchiutils.GnocchiBase): def run(self): """List archive policy rules.""" self.gnocchi.list_archive_policy_rule() @validation.add("required_services", services=[consts.Service.GNOCCHI]) @validation.add("required_platform", platform="openstack", admin=True) @scenario.configure( context={"admin_cleanup@openstack": ["gnocchi.archive_policy_rule"]}, name="GnocchiArchivePolicyRule.create_archive_policy_rule") class CreateArchivePolicyRule(gnocchiutils.GnocchiBase): def run(self, metric_pattern="cpu_*", archive_policy_name="low"): """Create archive policy rule. :param metric_pattern: Pattern for matching metrics :param archive_policy_name: Archive policy name """ name = self.generate_random_name() self.admin_gnocchi.create_archive_policy_rule( name, metric_pattern=metric_pattern, archive_policy_name=archive_policy_name) @validation.add("required_services", services=[consts.Service.GNOCCHI]) @validation.add("required_platform", platform="openstack", admin=True) @scenario.configure( context={"admin_cleanup@openstack": ["gnocchi.archive_policy_rule"]}, name="GnocchiArchivePolicyRule.create_delete_archive_policy_rule") class CreateDeleteArchivePolicyRule(gnocchiutils.GnocchiBase): def run(self, metric_pattern="cpu_*", archive_policy_name="low"): """Create archive policy rule and then delete it. :param metric_pattern: Pattern for matching metrics :param archive_policy_name: Archive policy name """ name = self.generate_random_name() self.admin_gnocchi.create_archive_policy_rule( name, metric_pattern=metric_pattern, archive_policy_name=archive_policy_name) self.admin_gnocchi.delete_archive_policy_rule(name)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,767
openstack/rally-openstack
refs/heads/master
/rally_openstack/common/services/identity/keystone_common.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.task import atomic from rally_openstack.common import osclients from rally_openstack.common.services.identity import identity class UnifiedKeystoneMixin(object): @staticmethod def _unify_service(service): return identity.Service(id=service.id, name=service.name) @staticmethod def _unify_role(role): return identity.Role(id=role.id, name=role.name) def delete_user(self, user_id): """Deletes user by its id.""" return self._impl.delete_user(user_id) def get_user(self, user_id): """Get user.""" return self._unify_user(self._impl.get_user(user_id)) def create_service(self, name=None, service_type=None, description=None): """Creates keystone service.""" return self._unify_service(self._impl.create_service( name=name, service_type=service_type, description=description)) def delete_service(self, service_id): """Deletes service.""" return self._impl.delete_service(service_id) def get_service(self, service_id): """Get service.""" return self._unify_service(self._impl.get_service(service_id)) def get_service_by_name(self, name): """List all services to find proper one.""" return self._unify_service(self._impl.get_service_by_name(name)) def get_role(self, role_id): """Get role.""" return self._unify_role(self._impl.get_role(role_id)) def delete_role(self, role_id): """Deletes role.""" return self._impl.delete_role(role_id) def list_ec2credentials(self, user_id): """List of access/secret pairs for a user_id. :param user_id: List all ec2-credentials for User ID :returns: Return ec2-credentials list """ return self._impl.list_ec2credentials(user_id) def delete_ec2credential(self, user_id, access): """Delete ec2credential. :param user_id: User ID for which to delete credential :param access: access key for ec2credential to delete """ return self._impl.delete_ec2credential(user_id=user_id, access=access) def fetch_token(self): """Authenticate user token.""" return self._impl.fetch_token() def validate_token(self, token): """Validate user token. :param token: Auth token to validate """ return self._impl.validate_token(token) class KeystoneMixin(object): def list_users(self): aname = "keystone_v%s.list_users" % self.version with atomic.ActionTimer(self, aname): return self._clients.keystone(self.version).users.list() def delete_user(self, user_id): """Deletes user by its id.""" aname = "keystone_v%s.delete_user" % self.version with atomic.ActionTimer(self, aname): self._clients.keystone(self.version).users.delete(user_id) def get_user(self, user_id): """Get user by its id.""" aname = "keystone_v%s.get_user" % self.version with atomic.ActionTimer(self, aname): return self._clients.keystone(self.version).users.get(user_id) def delete_service(self, service_id): """Deletes service.""" aname = "keystone_v%s.delete_service" % self.version with atomic.ActionTimer(self, aname): self._clients.keystone(self.version).services.delete(service_id) def list_services(self): """List all services.""" aname = "keystone_v%s.list_services" % self.version with atomic.ActionTimer(self, aname): return self._clients.keystone(self.version).services.list() def get_service(self, service_id): """Get service.""" aname = "keystone_v%s.get_services" % self.version with atomic.ActionTimer(self, aname): return self._clients.keystone(self.version).services.get( service_id) def get_service_by_name(self, name): """List all services to find proper one.""" for s in self.list_services(): if s.name == name: return s def delete_role(self, role_id): """Deletes role.""" aname = "keystone_v%s.delete_role" % self.version with atomic.ActionTimer(self, aname): self._clients.keystone(self.version).roles.delete(role_id) def list_roles(self): """List all roles.""" aname = "keystone_v%s.list_roles" % self.version with atomic.ActionTimer(self, aname): return self._clients.keystone(self.version).roles.list() def get_role(self, role_id): """Get role.""" aname = "keystone_v%s.get_role" % self.version with atomic.ActionTimer(self, aname): return self._clients.keystone(self.version).roles.get(role_id) def list_ec2credentials(self, user_id): """List of access/secret pairs for a user_id. :param user_id: List all ec2-credentials for User ID :returns: Return ec2-credentials list """ aname = "keystone_v%s.list_ec2creds" % self.version with atomic.ActionTimer(self, aname): return self._clients.keystone(self.version).ec2.list(user_id) def delete_ec2credential(self, user_id, access): """Delete ec2credential. :param user_id: User ID for which to delete credential :param access: access key for ec2credential to delete """ aname = "keystone_v%s.delete_ec2creds" % self.version with atomic.ActionTimer(self, aname): self._clients.keystone(self.version).ec2.delete(user_id=user_id, access=access) def fetch_token(self): """Authenticate user token.""" aname = "keystone_v%s.fetch_token" % self.version with atomic.ActionTimer(self, aname): # use another instance of osclients.Clients to avoid usage of # cached keystone session clients = osclients.Clients(credential=self._clients.credential) return clients.keystone.auth_ref.auth_token def validate_token(self, token): """Validate user token. :param token: Auth token to validate """ aname = "keystone_v%s.validate_token" % self.version with atomic.ActionTimer(self, aname): self._clients.keystone(self.version).tokens.validate(token)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,768
openstack/rally-openstack
refs/heads/master
/rally_openstack/common/services/storage/block.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import cfg from rally.common import logging from rally.task import service CONF = cfg.CONF LOG = logging.getLogger(__name__) Volume = service.make_resource_cls( "Volume", properties=["id", "name", "size", "status"]) VolumeSnapshot = service.make_resource_cls( "VolumeSnapshot", properties=["id", "name", "volume_id", "status"]) VolumeBackup = service.make_resource_cls( "VolumeBackup", properties=["id", "name", "volume_id", "status"]) VolumeTransfer = service.make_resource_cls( "VolumeTransfer", properties=["id", "name", "volume_id", "auth_key"]) VolumeEncryptionType = service.make_resource_cls( "VolumeEncryptionType", properties=["id", "volume_type_id"]) QoSSpecs = service.make_resource_cls( "QoSSpecs", properties=["id", "name", "specs"]) class BlockStorage(service.UnifiedService): @service.should_be_overridden def create_volume(self, size, consistencygroup_id=None, group_id=None, snapshot_id=None, source_volid=None, name=None, description=None, volume_type=None, user_id=None, project_id=None, availability_zone=None, metadata=None, imageRef=None, scheduler_hints=None, source_replica=None, backup_id=None): """Creates a volume. :param size: Size of volume in GB :param consistencygroup_id: ID of the consistencygroup :param group_id: ID of the group :param snapshot_id: ID of the snapshot :param name: Name of the volume :param description: Description of the volume :param volume_type: Type of volume :param user_id: User id derived from context :param project_id: Project id derived from context :param availability_zone: Availability Zone to use :param metadata: Optional metadata to set on volume creation :param imageRef: reference to an image stored in glance :param source_volid: ID of source volume to clone from :param source_replica: ID of source volume to clone replica(IGNORED) :param scheduler_hints: (optional extension) arbitrary key-value pairs specified by the client to help boot an instance :param backup_id: ID of the backup :returns: Return a new volume. """ if source_replica: LOG.warning("The argument `source_replica` would be ignored" " because it was removed from cinder api.") return self._impl.create_volume( size, consistencygroup_id=consistencygroup_id, group_id=group_id, snapshot_id=snapshot_id, source_volid=source_volid, name=name, description=description, volume_type=volume_type, user_id=user_id, project_id=project_id, availability_zone=availability_zone, metadata=metadata, imageRef=imageRef, scheduler_hints=scheduler_hints, backup_id=backup_id) @service.should_be_overridden def list_volumes(self, detailed=True, search_opts=None, marker=None, limit=None, sort=None): """Lists all volumes. :param detailed: Whether to return detailed volume info. :param search_opts: Search options to filter out volumes. :param marker: Begin returning volumes that appear later in the volume list than that represented by this volume id. :param limit: Maximum number of volumes to return. :param sort: Sort information :returns: Return volumes list. """ return self._impl.list_volumes( detailed=detailed, search_opts=search_opts, marker=marker, limit=limit, sort=sort) @service.should_be_overridden def get_volume(self, volume_id): """Get a volume. :param volume_id: The ID of the volume to get. :returns: Return the volume. """ return self._impl.get_volume(volume_id) @service.should_be_overridden def update_volume(self, volume_id, name=None, description=None): """Update the name or description for a volume. :param volume_id: The updated volume id. :param name: The volume name. :param description: The volume description. :returns: The updated volume. """ return self._impl.update_volume( volume_id, name=name, description=description) @service.should_be_overridden def delete_volume(self, volume): """Delete a volume.""" self._impl.delete_volume(volume) @service.should_be_overridden def extend_volume(self, volume, new_size): """Extend the size of the specified volume.""" return self._impl.extend_volume(volume, new_size=new_size) @service.should_be_overridden def list_snapshots(self, detailed=True): """Get a list of all snapshots.""" return self._impl.list_snapshots(detailed=detailed) @service.should_be_overridden def list_types(self, search_opts=None, is_public=None): """Lists all volume types.""" return self._impl.list_types(search_opts=search_opts, is_public=is_public) @service.should_be_overridden def set_metadata(self, volume, sets=10, set_size=3): """Update/Set a volume metadata. :param volume: The updated/setted volume. :param sets: how many operations to perform :param set_size: number of metadata keys to set in each operation :returns: A list of keys that were set """ return self._impl.set_metadata(volume, sets=sets, set_size=set_size) @service.should_be_overridden def delete_metadata(self, volume, keys, deletes=10, delete_size=3): """Delete volume metadata keys. Note that ``len(keys)`` must be greater than or equal to ``deletes * delete_size``. :param volume: The volume to delete metadata from :param deletes: how many operations to perform :param delete_size: number of metadata keys to delete in each operation :param keys: a list of keys to choose deletion candidates from """ self._impl.delete_metadata(volume, keys, deletes=deletes, delete_size=delete_size) @service.should_be_overridden def update_readonly_flag(self, volume, read_only): """Update the read-only access mode flag of the specified volume. :param volume: The UUID of the volume to update. :param read_only: The value to indicate whether to update volume to read-only access mode. :returns: A tuple of http Response and body """ return self._impl.update_readonly_flag(volume, read_only=read_only) @service.should_be_overridden def upload_volume_to_image(self, volume, force=False, container_format="bare", disk_format="raw"): """Upload the given volume to image. Returns created image. :param volume: volume object :param force: flag to indicate whether to snapshot a volume even if it's attached to an instance :param container_format: container format of image. Acceptable formats: ami, ari, aki, bare, and ovf :param disk_format: disk format of image. Acceptable formats: ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso :returns: Returns created image object """ return self._impl.upload_volume_to_image( volume, force=force, container_format=container_format, disk_format=disk_format) @service.should_be_overridden def create_qos(self, specs): """Create a qos specs. :param specs: A dict of key/value pairs to be set :rtype: :class:'QoSSpecs' """ return self._impl.create_qos(specs) @service.should_be_overridden def list_qos(self, search_opts=None): """Get a list of all qos specs. :param search_opts: search options :rtype: list of :class: 'QoSpecs' """ return self._impl.list_qos(search_opts) @service.should_be_overridden def get_qos(self, qos_id): """Get a specific qos specs. :param qos_id: The ID of the :class:`QoSSpecs` to get. :rtype: :class:`QoSSpecs` """ return self._impl.get_qos(qos_id) @service.should_be_overridden def set_qos(self, qos, set_specs_args): """Add/Update keys in qos specs. :param qos: The instance of the :class:`QoSSpecs` to set :param set_specs_args: A dict of key/value pairs to be set :rtype: :class:`QoSSpecs` """ return self._impl.set_qos(qos=qos, set_specs_args=set_specs_args) @service.should_be_overridden def qos_associate_type(self, qos_specs, volume_type): """Associate qos specs from volume type. :param qos_specs: The qos specs to be associated with :param volume_type: The volume type id to be associated with :rtype: :class:`QoSSpecs` """ return self._impl.qos_associate_type(qos_specs, volume_type) @service.should_be_overridden def qos_disassociate_type(self, qos_specs, volume_type): """Disassociate qos specs from volume type. :param qos_specs: The qos specs to be associated with :param volume_type: The volume type id to be disassociated with :rtype: :class:`QoSSpecs` """ return self._impl.qos_disassociate_type(qos_specs, volume_type) @service.should_be_overridden def create_snapshot(self, volume_id, force=False, name=None, description=None, metadata=None): """Create one snapshot. Returns when the snapshot is actually created and is in the "Available" state. :param volume_id: volume uuid for creating snapshot :param force: If force is True, create a snapshot even if the volume is attached to an instance. Default is False. :param name: Name of the snapshot :param description: Description of the snapshot :param metadata: Metadata of the snapshot :returns: Created snapshot object """ return self._impl.create_snapshot( volume_id, force=force, name=name, description=description, metadata=metadata) @service.should_be_overridden def delete_snapshot(self, snapshot): """Delete the given snapshot. Returns when the snapshot is actually deleted. :param snapshot: snapshot instance """ self._impl.delete_snapshot(snapshot) @service.should_be_overridden def create_backup(self, volume_id, container=None, name=None, description=None, incremental=False, force=False, snapshot_id=None): """Creates a volume backup. :param volume_id: The ID of the volume to backup. :param container: The name of the backup service container. :param name: The name of the backup. :param description: The description of the backup. :param incremental: Incremental backup. :param force: If True, allows an in-use volume to be backed up. :param snapshot_id: The ID of the snapshot to backup. :returns: The created backup object. """ return self._impl.create_backup(volume_id, container=container, name=name, description=description, incremental=incremental, force=force, snapshot_id=snapshot_id) @service.should_be_overridden def delete_backup(self, backup): """Delete a volume backup.""" self._impl.delete_backup(backup) @service.should_be_overridden def restore_backup(self, backup_id, volume_id=None): """Restore the given backup. :param backup_id: The ID of the backup to restore. :param volume_id: The ID of the volume to restore the backup to. :returns: Return the restored backup. """ return self._impl.restore_backup(backup_id, volume_id=volume_id) @service.should_be_overridden def list_backups(self, detailed=True): """Return user volume backups list.""" return self._impl.list_backups(detailed=detailed) @service.should_be_overridden def list_transfers(self, detailed=True, search_opts=None): """Get a list of all volume transfers. :param detailed: If True, detailed information about transfer should be listed :param search_opts: Search options to filter out volume transfers :returns: list of :class:`VolumeTransfer` """ return self._impl.list_transfers(detailed=detailed, search_opts=search_opts) @service.should_be_overridden def create_volume_type(self, name=None, description=None, is_public=True): """Creates a volume type. :param name: Descriptive name of the volume type :param description: Description of the volume type :param is_public: Volume type visibility :returns: Return the created volume type. """ return self._impl.create_volume_type(name=name, description=description, is_public=is_public) @service.should_be_overridden def update_volume_type(self, volume_type, name=None, description=None, is_public=None): """Update the name and/or description for a volume type. :param volume_type: The ID or an instance of the :class:`VolumeType` to update. :param name: if None, updates name by generating random name. else updates name with provided name :param description: Description of the volume type. :returns: Returns an updated volume type object. """ return self._impl.update_volume_type( volume_type=volume_type, name=name, description=description, is_public=is_public ) @service.should_be_overridden def add_type_access(self, volume_type, project): """Add a project to the given volume type access list. :param volume_type: Volume type name or ID to add access for the given project :param project: Project ID to add volume type access for :return: An instance of cinderclient.apiclient.base.TupleWithMeta """ return self._impl.add_type_access( volume_type=volume_type, project=project ) @service.should_be_overridden def list_type_access(self, volume_type): """Print access information about the given volume type :param volume_type: Filter results by volume type name or ID :return: VolumeTypeAccess of specific project """ return self._impl.list_type_access(volume_type) @service.should_be_overridden def get_volume_type(self, volume_type): """get details of volume_type. :param volume_type: The ID of the :class:`VolumeType` to get :returns: :class:`VolumeType` """ return self._impl.get_volume_type(volume_type) @service.should_be_overridden def delete_volume_type(self, volume_type): """delete a volume type. :param volume_type: Name or Id of the volume type :returns: base on client response return True if the request has been accepted or not """ return self._impl.delete_volume_type(volume_type) @service.should_be_overridden def set_volume_type_keys(self, volume_type, metadata): """Set extra specs on a volume type. :param volume_type: The :class:`VolumeType` to set extra spec on :param metadata: A dict of key/value pairs to be set :returns: extra_specs if the request has been accepted """ return self._impl.set_volume_type_keys(volume_type, metadata) @service.should_be_overridden def transfer_create(self, volume_id, name=None): """Creates a volume transfer. :param name: The name of created transfer :param volume_id: The ID of the volume to transfer. :returns: Return the created transfer. """ return self._impl.transfer_create(volume_id, name=name) @service.should_be_overridden def transfer_accept(self, transfer_id, auth_key): """Accept a volume transfer. :param transfer_id: The ID of the transfer to accept. :param auth_key: The auth_key of the transfer. :returns: VolumeTransfer """ return self._impl.transfer_accept(transfer_id, auth_key=auth_key) @service.should_be_overridden def create_encryption_type(self, volume_type, specs): """Create encryption type for a volume type. Default: admin only. :param volume_type: the volume type on which to add an encryption type :param specs: the encryption type specifications to add :return: an instance of :class: VolumeEncryptionType """ return self._impl.create_encryption_type(volume_type, specs=specs) @service.should_be_overridden def get_encryption_type(self, volume_type): """Get the volume encryption type for the specified volume type. :param volume_type: the volume type to query :return: an instance of :class: VolumeEncryptionType """ return self._impl.get_encryption_type(volume_type) @service.should_be_overridden def list_encryption_type(self, search_opts=None): """List all volume encryption types. :param search_opts: Options used when search for encryption types :return: a list of :class: VolumeEncryptionType instances """ return self._impl.list_encryption_type(search_opts=search_opts) @service.should_be_overridden def delete_encryption_type(self, volume_type): """Delete the encryption type information for the specified volume type :param volume_type: the volume type whose encryption type information must be deleted """ self._impl.delete_encryption_type(volume_type) @service.should_be_overridden def update_encryption_type(self, volume_type, specs): """Update the encryption type information for the specified volume type :param volume_type: the volume type whose encryption type information will be updated :param specs: the encryption type specifications to update :return: an instance of :class: VolumeEncryptionType """ return self._impl.update_encryption_type(volume_type, specs=specs)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,769
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/manila/test_utils.py
# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from rally import exceptions from rally_openstack.task.contexts.manila import consts from rally_openstack.task.scenarios.manila import utils from tests.unit import test BM_UTILS = "rally.task.utils." @ddt.ddt class ManilaScenarioTestCase(test.ScenarioTestCase): def setUp(self): super(ManilaScenarioTestCase, self).setUp() self.scenario = utils.ManilaScenario(self.context) def test__create_share(self): fake_share = mock.Mock() self.clients("manila").shares.create.return_value = fake_share self.scenario.context = { "tenant": { consts.SHARE_NETWORKS_CONTEXT_NAME: { "share_networks": [{"id": "sn_1_id"}, {"id": "sn_2_id"}], } }, "iteration": 0, } fake_random_name = "fake_random_name_value" self.scenario.generate_random_name = mock.Mock( return_value=fake_random_name) self.scenario._create_share("nfs") self.clients("manila").shares.create.assert_called_once_with( "nfs", 1, name=fake_random_name, share_network=self.scenario.context["tenant"][ consts.SHARE_NETWORKS_CONTEXT_NAME]["share_networks"][0]["id"]) self.mock_wait_for_status.mock.assert_called_once_with( fake_share, ready_statuses=["available"], update_resource=self.mock_get_from_manager.mock.return_value, timeout=300, check_interval=3) self.mock_get_from_manager.mock.assert_called_once_with() @mock.patch(BM_UTILS + "wait_for_status") def test__delete_share(self, mock_wait_for_status): fake_share = mock.MagicMock() self.scenario._delete_share(fake_share) fake_share.delete.assert_called_once_with() mock_wait_for_status.assert_called_once_with( fake_share, ready_statuses=["deleted"], check_deletion=True, update_resource=self.mock_get_from_manager.mock.return_value, timeout=180, check_interval=2) self.mock_get_from_manager.mock.assert_called_once_with( ("error_deleting", )) def test_export_location(self): fake_share = mock.MagicMock() fake_share.export_locations = "fake_location" result = self.scenario._export_location(fake_share) self.assertEqual(result, "fake_location") @ddt.data( {}, {"detailed": False, "search_opts": None}, {"detailed": True, "search_opts": {"name": "foo_sn"}}, {"search_opts": {"project_id": "fake_project"}}, ) def test__list_shares(self, params): fake_shares = ["foo", "bar"] self.clients("manila").shares.list.return_value = fake_shares result = self.scenario._list_shares(**params) self.assertEqual(fake_shares, result) self.clients("manila").shares.list.assert_called_once_with( detailed=params.get("detailed", True), search_opts=params.get("search_opts")) @ddt.data( {"new_size": 5}, {"new_size": 10} ) def test__extend_share(self, new_size): fake_share = mock.MagicMock() self.scenario._extend_share(fake_share, new_size) self.clients("manila").shares.extend.assert_called_once_with( fake_share, new_size) self.mock_wait_for_status.mock.assert_called_once_with( fake_share, ready_statuses=["available"], update_resource=self.mock_get_from_manager.mock.return_value, timeout=300, check_interval=3) self.mock_get_from_manager.mock.assert_called_once_with() @ddt.data( {"new_size": 5}, {"new_size": 10} ) def test__shrink_share(self, new_size): fake_share = mock.MagicMock() self.scenario._shrink_share(fake_share, new_size) fake_share.shrink.assert_called_with(new_size) self.mock_wait_for_status.mock.assert_called_once_with( fake_share, ready_statuses=["available"], update_resource=self.mock_get_from_manager.mock.return_value, timeout=300, check_interval=3) self.mock_get_from_manager.mock.assert_called_once_with() @ddt.data( { "access_type": "ip", "access": "1.2.3.4", "access_level": "rw", "access_id": "foo" }, { "access_type": "domain", "access": "4.3.2.1", "access_level": "ro", "access_id": "bar" } ) @ddt.unpack def test__allow_access_share(self, access_type, access, access_level, access_id): fake_allow_result = {"id": access_id} fake_access = mock.MagicMock() fake_access.id = access_id fake_update = mock.MagicMock() self.scenario._update_resource_in_allow_access_share = mock.MagicMock( return_value=fake_update) fake_share = mock.MagicMock() fake_share.allow.return_value = fake_allow_result fake_share.access_list.return_value = [fake_access] self.assertEqual(self.scenario._allow_access_share( fake_share, access_type, access, access_level), fake_allow_result) self.scenario._update_resource_in_allow_access_share \ .assert_called_with(fake_share, access_id) self.mock_wait_for_status.mock.assert_called_once_with( fake_access, ready_statuses=["active"], update_resource=fake_update, check_interval=3.0, timeout=300.0) def test__get_access_from_share_with_no_access_in_share(self): access_id = "foo" fake_share = mock.MagicMock() fake_access = mock.MagicMock() fake_access.id = access_id fake_share.access_list.return_value = [] self.assertRaises(exceptions.GetResourceNotFound, self.scenario._get_access_from_share, fake_share, access_id) def test__get_access_from_share(self): access_id = "foo" fake_share = mock.MagicMock() fake_access = mock.MagicMock() fake_access.id = access_id fake_share.access_list.return_value = [fake_access] access = self.scenario._get_access_from_share(fake_share, access_id) self.assertEqual(access, fake_access) def test__update_resource_in_allow_access_share(self): access_id = "foo" fake_share = mock.MagicMock() fake_resource = mock.MagicMock() fake_access = mock.MagicMock() fake_access.id = access_id fake_share.access_list.return_value = [fake_access] fn = self.scenario._update_resource_in_allow_access_share( fake_share, access_id) self.assertEqual(fn(fake_resource), fake_access) def test__deny_access_share(self): access_id = "foo" fake_access = mock.MagicMock() fake_access.id = access_id fake_update = mock.MagicMock() self.scenario._update_resource_in_deny_access_share = mock.MagicMock( return_value=fake_update) fake_share = mock.MagicMock() fake_share.access_list.return_value = [fake_access] self.scenario._deny_access_share(fake_share, access_id) self.scenario._update_resource_in_deny_access_share \ .assert_called_with(fake_share, access_id) self.mock_wait_for_status.mock.assert_called_once_with( fake_access, check_deletion=True, ready_statuses=["deleted"], update_resource=fake_update, check_interval=2.0, timeout=180.0) def test__update_resource_in_deny_access_share(self): access_id = "foo" fake_share = mock.MagicMock() fake_resource = mock.MagicMock() fake_access = mock.MagicMock() fake_access.id = access_id fake_share.access_list.return_value = [fake_access] fn = self.scenario._update_resource_in_deny_access_share( fake_share, access_id) assert fn(fake_resource) == fake_access def test__update_resource_in_deny_access_share_with_deleted_resource(self): access_id = "foo" fake_share = mock.MagicMock() fake_resource = mock.MagicMock() fake_access = mock.MagicMock() fake_access.access_id = access_id fake_share.access_list.return_value = [] fn = self.scenario._update_resource_in_deny_access_share( fake_share, access_id) self.assertRaises(exceptions.GetResourceNotFound, fn, fake_resource) def test__create_share_network(self): fake_sn = mock.Mock() self.scenario.generate_random_name = mock.Mock() self.clients("manila").share_networks.create.return_value = fake_sn data = { "neutron_net_id": "fake_neutron_net_id", "neutron_subnet_id": "fake_neutron_subnet_id", "nova_net_id": "fake_nova_net_id", "description": "fake_description", } expected = dict(data) expected["name"] = self.scenario.generate_random_name.return_value result = self.scenario._create_share_network(**data) self.assertEqual(fake_sn, result) self.clients("manila").share_networks.create.assert_called_once_with( **expected) @mock.patch(BM_UTILS + "wait_for_status") def test__delete_share_network(self, mock_wait_for_status): fake_sn = mock.MagicMock() self.scenario._delete_share_network(fake_sn) fake_sn.delete.assert_called_once_with() mock_wait_for_status.assert_called_once_with( fake_sn, ready_statuses=["deleted"], check_deletion=True, update_resource=self.mock_get_from_manager.mock.return_value, timeout=180, check_interval=2) self.mock_get_from_manager.mock.assert_called_once_with() @ddt.data( {"detailed": True, "search_opts": {"name": "foo_sn"}}, {"detailed": False, "search_opts": None}, {}, {"search_opts": {"project_id": "fake_project"}}, ) def test__list_share_networks(self, params): fake_share_networks = ["foo", "bar"] self.clients("manila").share_networks.list.return_value = ( fake_share_networks) result = self.scenario._list_share_networks(**params) self.assertEqual(fake_share_networks, result) self.clients("manila").share_networks.list.assert_called_once_with( detailed=params.get("detailed", True), search_opts=params.get("search_opts")) @ddt.data( {}, {"search_opts": None}, {"search_opts": {"project_id": "fake_project"}}, ) def test__list_share_servers(self, params): fake_share_servers = ["foo", "bar"] self.admin_clients("manila").share_servers.list.return_value = ( fake_share_servers) result = self.scenario._list_share_servers(**params) self.assertEqual(fake_share_servers, result) self.admin_clients( "manila").share_servers.list.assert_called_once_with( search_opts=params.get("search_opts")) @ddt.data("ldap", "kerberos", "active_directory") def test__create_security_service(self, ss_type): fake_ss = mock.Mock() self.clients("manila").security_services.create.return_value = fake_ss self.scenario.generate_random_name = mock.Mock() data = { "security_service_type": ss_type, "dns_ip": "fake_dns_ip", "server": "fake_server", "domain": "fake_domain", "user": "fake_user", "password": "fake_password", "description": "fake_description", } expected = dict(data) expected["type"] = expected.pop("security_service_type") expected["name"] = self.scenario.generate_random_name.return_value result = self.scenario._create_security_service(**data) self.assertEqual(fake_ss, result) self.clients( "manila").security_services.create.assert_called_once_with( **expected) @mock.patch(BM_UTILS + "wait_for_status") def test__delete_security_service(self, mock_wait_for_status): fake_ss = mock.MagicMock() self.scenario._delete_security_service(fake_ss) fake_ss.delete.assert_called_once_with() mock_wait_for_status.assert_called_once_with( fake_ss, ready_statuses=["deleted"], check_deletion=True, update_resource=self.mock_get_from_manager.mock.return_value, timeout=180, check_interval=2) self.mock_get_from_manager.mock.assert_called_once_with() def test__add_security_service_to_share_network(self): fake_sn = mock.MagicMock() fake_ss = mock.MagicMock() result = self.scenario._add_security_service_to_share_network( share_network=fake_sn, security_service=fake_ss) self.assertEqual( self.clients( "manila").share_networks.add_security_service.return_value, result) self.clients( "manila").share_networks.add_security_service.assert_has_calls([ mock.call(fake_sn, fake_ss)]) @ddt.data( {"key_min_length": 5, "key_max_length": 4}, {"value_min_length": 5, "value_max_length": 4}, ) def test__set_metadata_wrong_params(self, params): self.assertRaises( exceptions.InvalidArgumentsException, self.scenario._set_metadata, {"id": "fake_share_id"}, **params) @ddt.data( {}, {"sets": 0, "set_size": 1}, {"sets": 1, "set_size": 1}, {"sets": 5, "set_size": 7}, {"sets": 5, "set_size": 2}, {"key_min_length": 1, "key_max_length": 1}, {"key_min_length": 1, "key_max_length": 2}, {"key_min_length": 256, "key_max_length": 256}, {"value_min_length": 1, "value_max_length": 1}, {"value_min_length": 1, "value_max_length": 2}, {"value_min_length": 1024, "value_max_length": 1024}, ) def test__set_metadata(self, params): share = {"id": "fake_share_id"} sets = params.get("sets", 1) set_size = params.get("set_size", 1) gen_name_calls = sets * set_size * 2 data = range(gen_name_calls) generator_data = iter(data) def fake_random_name(prefix="fake", length="fake"): return next(generator_data) scenario = self.scenario scenario.clients = mock.MagicMock() scenario._generate_random_part = mock.MagicMock( side_effect=fake_random_name) keys = scenario._set_metadata(share, **params) self.assertEqual( gen_name_calls, scenario._generate_random_part.call_count) self.assertEqual( params.get("sets", 1), scenario.clients.return_value.shares.set_metadata.call_count) scenario.clients.return_value.shares.set_metadata.assert_has_calls([ mock.call( share["id"], dict([(j, j + 1) for j in data[ i * set_size * 2: (i + 1) * set_size * 2: 2]]) ) for i in range(sets) ]) self.assertEqual([i for i in range(0, gen_name_calls, 2)], keys) @ddt.data(None, [], {"fake_set"}, {"fake_key": "fake_value"}) def test__delete_metadata_wrong_params(self, keys): self.assertRaises( exceptions.InvalidArgumentsException, self.scenario._delete_metadata, "fake_share", keys=keys, ) @ddt.data( {"keys": [i for i in range(30)]}, {"keys": list(range(7)), "delete_size": 2}, {"keys": list(range(7)), "delete_size": 3}, {"keys": list(range(7)), "delete_size": 4}, ) def test__delete_metadata(self, params): share = {"id": "fake_share_id"} delete_size = params.get("delete_size", 3) keys = params.get("keys", []) scenario = self.scenario scenario.clients = mock.MagicMock() scenario._delete_metadata(share, **params) scenario.clients.return_value.shares.delete_metadata.assert_has_calls([ mock.call(share["id"], keys[i:i + delete_size]) for i in range(0, len(keys), delete_size) ])
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,770
openstack/rally-openstack
refs/heads/master
/tests/unit/common/test_osclients.py
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from rally.common import cfg from rally import exceptions from rally_openstack.common import consts from rally_openstack.common import credential as oscredential from rally_openstack.common import osclients from tests.unit import fakes from tests.unit import test PATH = "rally_openstack.common.osclients" @osclients.configure("dummy", supported_versions=("0.1", "1"), default_service_type="bar") class DummyClient(osclients.OSClient): def create_client(self, *args, **kwargs): pass class OSClientTestCaseUtils(object): def set_up_keystone_mocks(self): self.ksc_module = mock.MagicMock(__version__="2.0.0") self.ksc_client = mock.MagicMock() self.ksa_identity_plugin = mock.MagicMock() self.ksa_password = mock.MagicMock( return_value=self.ksa_identity_plugin) self.ksa_identity = mock.MagicMock(Password=self.ksa_password) self.ksa_auth = mock.MagicMock() self.ksa_session = mock.MagicMock() self.patcher = mock.patch.dict("sys.modules", {"keystoneclient": self.ksc_module, "keystoneauth1": self.ksa_auth}) self.patcher.start() self.addCleanup(self.patcher.stop) self.ksc_module.client = self.ksc_client self.ksa_auth.identity = self.ksa_identity self.ksa_auth.session = self.ksa_session def make_auth_args(self): auth_kwargs = { "auth_url": "http://auth_url/", "username": "user", "password": "password", "tenant_name": "tenant", "domain_name": "domain", "project_name": "project_name", "project_domain_name": "project_domain_name", "user_domain_name": "user_domain_name", } kwargs = {"https_insecure": False, "https_cacert": None} kwargs.update(auth_kwargs) return auth_kwargs, kwargs @ddt.ddt class OSClientTestCase(test.TestCase, OSClientTestCaseUtils): @ddt.data((0.1, True), (1, True), ("0.1", True), ("1", True), (0.2, False), ("foo", False)) @ddt.unpack def test_validate_version(self, version, valid): if valid: DummyClient.validate_version(version) else: self.assertRaises(exceptions.ValidationError, DummyClient.validate_version, version) def test_choose_service_type(self): default_service_type = "default_service_type" @osclients.configure(self.id(), default_service_type=default_service_type) class FakeClient(osclients.OSClient): create_client = mock.MagicMock() fake_client = FakeClient({"auth_url": "url", "username": "user", "password": "pass"}, {}) self.assertEqual(default_service_type, fake_client.choose_service_type()) self.assertEqual("foo", fake_client.choose_service_type("foo")) @mock.patch("%s.Keystone.service_catalog" % PATH) @ddt.data( {"endpoint_type": None, "service_type": None, "region_name": None}, {"endpoint_type": "et", "service_type": "st", "region_name": "rn"} ) @ddt.unpack def test__get_endpoint(self, mock_keystone_service_catalog, endpoint_type, service_type, region_name): credential = oscredential.OpenStackCredential( "http://auth_url/v2.0", "user", "pass", endpoint_type=endpoint_type, region_name=region_name) mock_choose_service_type = mock.MagicMock() osclient = osclients.OSClient(credential, mock.MagicMock()) osclient.choose_service_type = mock_choose_service_type mock_url_for = mock_keystone_service_catalog.url_for self.assertEqual(mock_url_for.return_value, osclient._get_endpoint(service_type)) call_args = { "service_type": mock_choose_service_type.return_value, "region_name": region_name} if endpoint_type: call_args["interface"] = endpoint_type mock_url_for.assert_called_once_with(**call_args) mock_choose_service_type.assert_called_once_with(service_type) class CachedTestCase(test.TestCase): def test_cached(self): clients = osclients.Clients({"auth_url": "url", "username": "user", "password": "pass"}) @osclients.configure(self.id()) class SomeClient(osclients.OSClient): pass fake_client = SomeClient(clients.credential, clients.cache) fake_client.create_client = mock.MagicMock() self.assertEqual({}, clients.cache) fake_client() self.assertEqual( {self.id(): fake_client.create_client.return_value}, clients.cache) fake_client.create_client.assert_called_once_with() fake_client() fake_client.create_client.assert_called_once_with() fake_client("2") self.assertEqual( {self.id(): fake_client.create_client.return_value, "%s('2',)" % self.id(): fake_client.create_client.return_value}, clients.cache) clients.clear() self.assertEqual({}, clients.cache) @ddt.ddt class TestCreateKeystoneClient(test.TestCase, OSClientTestCaseUtils): def setUp(self): super(TestCreateKeystoneClient, self).setUp() self.credential = oscredential.OpenStackCredential( "http://auth_url/v2.0", "user", "pass", "tenant") def test_create_client(self): # NOTE(bigjools): This is a very poor testing strategy as it # tightly couples the test implementation to the tested # function's implementation. Ideally, we'd use a fake keystone # but all that's happening here is that it's checking the right # parameters were passed to the various parts that create a # client. Hopefully one day we'll get a real fake from the # keystone guys. self.set_up_keystone_mocks() keystone = osclients.Keystone(self.credential, mock.MagicMock()) keystone.get_session = mock.Mock( return_value=(self.ksa_session, self.ksa_identity_plugin,)) client = keystone.create_client(version=3) kwargs_session = self.credential.to_dict() kwargs_session.update({ "auth_url": "http://auth_url/", "session": self.ksa_session, "timeout": 180.0}) keystone.get_session.assert_called_with() called_with = self.ksc_client.Client.call_args_list[0][1] self.assertEqual( {"session": self.ksa_session, "timeout": 180.0, "version": "3"}, called_with) self.ksc_client.Client.assert_called_once_with( session=self.ksa_session, timeout=180.0, version="3") self.assertIs(client, self.ksc_client.Client()) def test_create_client_removes_url_path_if_version_specified(self): # If specifying a version on the client creation call, ensure # the auth_url is versionless and the version required is passed # into the Client() call. self.set_up_keystone_mocks() auth_kwargs, all_kwargs = self.make_auth_args() keystone = osclients.Keystone( self.credential, mock.MagicMock()) keystone.get_session = mock.Mock( return_value=(self.ksa_session, self.ksa_identity_plugin,)) client = keystone.create_client(version="3") self.assertIs(client, self.ksc_client.Client()) called_with = self.ksc_client.Client.call_args_list[0][1] self.assertEqual( {"session": self.ksa_session, "timeout": 180.0, "version": "3"}, called_with) @ddt.data({"original": "https://example.com/identity/foo/v3", "cropped": "https://example.com/identity/foo"}, {"original": "https://example.com/identity/foo/v3/", "cropped": "https://example.com/identity/foo"}, {"original": "https://example.com/identity/foo/v2.0", "cropped": "https://example.com/identity/foo"}, {"original": "https://example.com/identity/foo/v2.0/", "cropped": "https://example.com/identity/foo"}, {"original": "https://example.com/identity/foo", "cropped": "https://example.com/identity/foo"}) @ddt.unpack def test__remove_url_version(self, original, cropped): credential = oscredential.OpenStackCredential( original, "user", "pass", "tenant") keystone = osclients.Keystone(credential, {}) self.assertEqual(cropped, keystone._remove_url_version()) @ddt.data("http://auth_url/v2.0", "http://auth_url/v3", "http://auth_url/", "auth_url") def test_keystone_get_session(self, auth_url): credential = oscredential.OpenStackCredential( auth_url, "user", "pass", "tenant") self.set_up_keystone_mocks() keystone = osclients.Keystone(credential, {}) version_data = mock.Mock(return_value=[{"version": (1, 0)}]) self.ksa_auth.discover.Discover.return_value = ( mock.Mock(version_data=version_data)) self.assertEqual((self.ksa_session.Session.return_value, self.ksa_identity_plugin), keystone.get_session()) if auth_url.endswith("v2.0"): self.ksa_password.assert_called_once_with( auth_url=auth_url, password="pass", tenant_name="tenant", username="user") else: self.ksa_password.assert_called_once_with( auth_url=auth_url, password="pass", tenant_name="tenant", username="user", domain_name=None, project_domain_name=None, user_domain_name=None) self.assertEqual( [mock.call(timeout=180.0, verify=True, cert=None), mock.call(auth=self.ksa_identity_plugin, timeout=180.0, verify=True, cert=None)], self.ksa_session.Session.call_args_list ) def test_keystone_property(self): keystone = osclients.Keystone(self.credential, None) self.assertRaises(exceptions.RallyException, lambda: keystone.keystone) @mock.patch("%s.Keystone.get_session" % PATH) def test_auth_ref(self, mock_keystone_get_session): session = mock.MagicMock() auth_plugin = mock.MagicMock() mock_keystone_get_session.return_value = (session, auth_plugin) cache = {} keystone = osclients.Keystone(self.credential, cache) self.assertEqual(auth_plugin.get_access.return_value, keystone.auth_ref) self.assertEqual(auth_plugin.get_access.return_value, cache["keystone_auth_ref"]) # check that auth_ref was cached. keystone.auth_ref mock_keystone_get_session.assert_called_once_with() @mock.patch("%s.LOG.exception" % PATH) @mock.patch("%s.logging.is_debug" % PATH) def test_auth_ref_fails(self, mock_is_debug, mock_log_exception): mock_is_debug.return_value = False keystone = osclients.Keystone(self.credential, {}) session = mock.Mock() auth_plugin = mock.Mock() auth_plugin.get_access.side_effect = Exception keystone.get_session = mock.Mock(return_value=(session, auth_plugin)) self.assertRaises(osclients.AuthenticationFailed, lambda: keystone.auth_ref) self.assertFalse(mock_log_exception.called) mock_is_debug.assert_called_once_with() auth_plugin.get_access.assert_called_once_with(session) @mock.patch("%s.LOG.exception" % PATH) @mock.patch("%s.logging.is_debug" % PATH) def test_auth_ref_fails_debug(self, mock_is_debug, mock_log_exception): mock_is_debug.return_value = True keystone = osclients.Keystone(self.credential, {}) session = mock.Mock() auth_plugin = mock.Mock() auth_plugin.get_access.side_effect = Exception keystone.get_session = mock.Mock(return_value=(session, auth_plugin)) self.assertRaises(osclients.AuthenticationFailed, lambda: keystone.auth_ref) mock_log_exception.assert_called_once_with(mock.ANY) mock_is_debug.assert_called_once_with() auth_plugin.get_access.assert_called_once_with(session) @mock.patch("%s.LOG.exception" % PATH) @mock.patch("%s.logging.is_debug" % PATH) def test_auth_ref_fails_debug_with_native_keystone_error( self, mock_is_debug, mock_log_exception): from keystoneauth1 import exceptions as ks_exc mock_is_debug.return_value = True keystone = osclients.Keystone(self.credential, {}) session = mock.Mock() auth_plugin = mock.Mock() auth_plugin.get_access.side_effect = ks_exc.ConnectFailure("foo") keystone.get_session = mock.Mock(return_value=(session, auth_plugin)) self.assertRaises(osclients.AuthenticationFailed, lambda: keystone.auth_ref) self.assertFalse(mock_log_exception.called) mock_is_debug.assert_called_once_with() auth_plugin.get_access.assert_called_once_with(session) def test_authentication_failed_exception(self): from keystoneauth1 import exceptions as ks_exc original_e = KeyError("Oops") e = osclients.AuthenticationFailed( url="https://example.com", username="foo", project="project", error=original_e ) self.assertEqual( "Failed to authenticate to https://example.com for user 'foo' in " "project 'project': [KeyError] 'Oops'", e.format_message()) original_e = ks_exc.Unauthorized("The request you have made requires " "authentication.", request_id="ID") e = osclients.AuthenticationFailed( url="https://example.com", username="foo", project="project", error=original_e ) self.assertEqual( "Failed to authenticate to https://example.com for user 'foo' in " "project 'project': The request you have made requires " "authentication.", e.format_message()) original_e = ks_exc.ConnectionError("Some user-friendly native error") e = osclients.AuthenticationFailed( url="https://example.com", username="foo", project="project", error=original_e ) self.assertEqual("Some user-friendly native error", e.format_message()) original_e = ks_exc.ConnectionError( "Unable to establish connection to https://example.com:500: " "HTTPSConnectionPool(host='example.com', port=500): Max retries " "exceeded with url: / (Caused by NewConnectionError('<urllib3." "connection.VerifiedHTTPSConnection object at 0x7fb87a48e510>: " "Failed to establish a new connection: [Errno 101] Network " "is unreachable") e = osclients.AuthenticationFailed( url="https://example.com", username="foo", project="project", error=original_e ) self.assertEqual( "Unable to establish connection to https://example.com:500", e.format_message()) original_e = ks_exc.ConnectionError( "Unable to establish connection to https://example.com:500: " # another pool class "HTTPConnectionPool(host='example.com', port=500): Max retries " "exceeded with url: / (Caused by NewConnectionError('<urllib3." "connection.VerifiedHTTPSConnection object at 0x7fb87a48e510>: " "Failed to establish a new connection: [Errno 101] Network " "is unreachable") e = osclients.AuthenticationFailed( url="https://example.com", username="foo", project="project", error=original_e ) self.assertEqual( "Unable to establish connection to https://example.com:500", e.format_message()) @ddt.ddt class OSClientsTestCase(test.TestCase): def setUp(self): super(OSClientsTestCase, self).setUp() self.credential = oscredential.OpenStackCredential( "http://auth_url/v2.0", "user", "pass", "tenant") self.clients = osclients.Clients(self.credential, {}) self.fake_keystone = fakes.FakeKeystoneClient() keystone_patcher = mock.patch( "%s.Keystone.create_client" % PATH, return_value=self.fake_keystone) self.mock_create_keystone_client = keystone_patcher.start() self.auth_ref_patcher = mock.patch("%s.Keystone.auth_ref" % PATH) self.auth_ref = self.auth_ref_patcher.start() self.service_catalog = self.auth_ref.service_catalog self.service_catalog.url_for = mock.MagicMock() def test_create_from_env(self): with mock.patch.dict("os.environ", {"OS_AUTH_URL": "foo_auth_url", "OS_USERNAME": "foo_username", "OS_PASSWORD": "foo_password", "OS_TENANT_NAME": "foo_tenant_name", "OS_REGION_NAME": "foo_region_name"}): clients = osclients.Clients.create_from_env() self.assertEqual("foo_auth_url", clients.credential.auth_url) self.assertEqual("foo_username", clients.credential.username) self.assertEqual("foo_password", clients.credential.password) self.assertEqual("foo_tenant_name", clients.credential.tenant_name) self.assertEqual("foo_region_name", clients.credential.region_name) def test_keystone(self): self.assertNotIn("keystone", self.clients.cache) client = self.clients.keystone() self.assertEqual(self.fake_keystone, client) credential = {"timeout": cfg.CONF.openstack_client_http_timeout, "insecure": False, "cacert": None} kwargs = self.credential.to_dict() kwargs.update(credential) self.mock_create_keystone_client.assert_called_once_with() self.assertEqual(self.fake_keystone, self.clients.cache["keystone"]) def test_keystone_versions(self): self.clients.keystone.validate_version(2) self.clients.keystone.validate_version(3) def test_keysonte_service_type(self): self.assertRaises(exceptions.RallyException, self.clients.keystone.is_service_type_configurable) def test_verified_keystone(self): self.auth_ref.role_names = ["admin"] self.assertEqual(self.mock_create_keystone_client.return_value, self.clients.verified_keystone()) def test_verified_keystone_user_not_admin(self): self.auth_ref.role_names = ["notadmin"] self.assertRaises(exceptions.InvalidAdminException, self.clients.verified_keystone) @mock.patch("%s.Keystone.get_session" % PATH) def test_verified_keystone_authentication_fails(self, mock_keystone_get_session): self.auth_ref_patcher.stop() mock_keystone_get_session.side_effect = ( exceptions.AuthenticationFailed( username=self.credential.username, project=self.credential.tenant_name, url=self.credential.auth_url, etype=KeyError, error="oops") ) self.assertRaises(exceptions.AuthenticationFailed, self.clients.verified_keystone) @mock.patch("%s.Nova._get_endpoint" % PATH) def test_nova(self, mock_nova__get_endpoint): fake_nova = fakes.FakeNovaClient() mock_nova__get_endpoint.return_value = "http://fake.to:2/fake" mock_nova = mock.MagicMock() mock_nova.client.Client.return_value = fake_nova mock_keystoneauth1 = mock.MagicMock() self.assertNotIn("nova", self.clients.cache) with mock.patch.dict("sys.modules", {"novaclient": mock_nova, "keystoneauth1": mock_keystoneauth1}): mock_keystoneauth1.discover.Discover.return_value = ( mock.Mock(version_data=mock.Mock(return_value=[ {"version": (2, 0)}])) ) client = self.clients.nova() self.assertEqual(fake_nova, client) kw = { "version": "2", "session": mock_keystoneauth1.session.Session(), "endpoint_override": mock_nova__get_endpoint.return_value} mock_nova.client.Client.assert_called_once_with(**kw) self.assertEqual(fake_nova, self.clients.cache["nova"]) def test_nova_validate_version(self): osclients.Nova.validate_version("2") self.assertRaises(exceptions.RallyException, osclients.Nova.validate_version, "foo") def test_nova_service_type(self): self.clients.nova.is_service_type_configurable() @mock.patch("%s.Neutron._get_endpoint" % PATH) def test_neutron(self, mock_neutron__get_endpoint): fake_neutron = fakes.FakeNeutronClient() mock_neutron__get_endpoint.return_value = "http://fake.to:2/fake" mock_neutron = mock.MagicMock() mock_keystoneauth1 = mock.MagicMock() mock_neutron.client.Client.return_value = fake_neutron self.assertNotIn("neutron", self.clients.cache) with mock.patch.dict("sys.modules", {"neutronclient.neutron": mock_neutron, "keystoneauth1": mock_keystoneauth1}): client = self.clients.neutron() self.assertEqual(fake_neutron, client) kw = { "session": mock_keystoneauth1.session.Session(), "endpoint_override": mock_neutron__get_endpoint.return_value} mock_neutron.client.Client.assert_called_once_with("2.0", **kw) self.assertEqual(fake_neutron, self.clients.cache["neutron"]) @mock.patch("%s.Neutron._get_endpoint" % PATH) def test_neutron_endpoint_type(self, mock_neutron__get_endpoint): fake_neutron = fakes.FakeNeutronClient() mock_neutron__get_endpoint.return_value = "http://fake.to:2/fake" mock_neutron = mock.MagicMock() mock_keystoneauth1 = mock.MagicMock() mock_neutron.client.Client.return_value = fake_neutron self.assertNotIn("neutron", self.clients.cache) self.credential["endpoint_type"] = "internal" with mock.patch.dict("sys.modules", {"neutronclient.neutron": mock_neutron, "keystoneauth1": mock_keystoneauth1}): client = self.clients.neutron() self.assertEqual(fake_neutron, client) kw = { "session": mock_keystoneauth1.session.Session(), "endpoint_override": mock_neutron__get_endpoint.return_value, "endpoint_type": "internal"} mock_neutron.client.Client.assert_called_once_with("2.0", **kw) self.assertEqual(fake_neutron, self.clients.cache["neutron"]) @mock.patch("%s.Octavia._get_endpoint" % PATH) def test_octavia(self, mock_octavia__get_endpoint): fake_octavia = fakes.FakeOctaviaClient() mock_octavia__get_endpoint.return_value = "http://fake.to:2/fake" mock_octavia = mock.MagicMock() mock_keystoneauth1 = mock.MagicMock() mock_octavia.octavia.OctaviaAPI.return_value = fake_octavia self.assertNotIn("octavia", self.clients.cache) with mock.patch.dict("sys.modules", {"octaviaclient.api.v2": mock_octavia, "keystoneauth1": mock_keystoneauth1}): client = self.clients.octavia() self.assertEqual(fake_octavia, client) kw = {"endpoint": mock_octavia__get_endpoint.return_value, "session": mock_keystoneauth1.session.Session()} mock_octavia.octavia.OctaviaAPI.assert_called_once_with(**kw) self.assertEqual(fake_octavia, self.clients.cache["octavia"]) @mock.patch("%s.Heat._get_endpoint" % PATH) def test_heat(self, mock_heat__get_endpoint): fake_heat = fakes.FakeHeatClient() mock_heat__get_endpoint.return_value = "http://fake.to:2/fake" mock_heat = mock.MagicMock() mock_keystoneauth1 = mock.MagicMock() mock_heat.client.Client.return_value = fake_heat self.assertNotIn("heat", self.clients.cache) with mock.patch.dict("sys.modules", {"heatclient": mock_heat, "keystoneauth1": mock_keystoneauth1}): client = self.clients.heat() self.assertEqual(fake_heat, client) kw = { "session": mock_keystoneauth1.session.Session(), "endpoint_override": mock_heat__get_endpoint.return_value} mock_heat.client.Client.assert_called_once_with("1", **kw) self.assertEqual(fake_heat, self.clients.cache["heat"]) @mock.patch("%s.Heat._get_endpoint" % PATH) def test_heat_endpoint_type_interface(self, mock_heat__get_endpoint): fake_heat = fakes.FakeHeatClient() mock_heat__get_endpoint.return_value = "http://fake.to:2/fake" mock_heat = mock.MagicMock() mock_keystoneauth1 = mock.MagicMock() mock_heat.client.Client.return_value = fake_heat self.assertNotIn("heat", self.clients.cache) self.credential["endpoint_type"] = "internal" with mock.patch.dict("sys.modules", {"heatclient": mock_heat, "keystoneauth1": mock_keystoneauth1}): client = self.clients.heat() self.assertEqual(fake_heat, client) kw = { "session": mock_keystoneauth1.session.Session(), "endpoint_override": mock_heat__get_endpoint.return_value, "interface": "internal"} mock_heat.client.Client.assert_called_once_with("1", **kw) self.assertEqual(fake_heat, self.clients.cache["heat"]) @mock.patch("%s.Glance._get_endpoint" % PATH) def test_glance(self, mock_glance__get_endpoint): fake_glance = fakes.FakeGlanceClient() mock_glance = mock.MagicMock() mock_glance__get_endpoint.return_value = "http://fake.to:2/fake" mock_keystoneauth1 = mock.MagicMock() mock_glance.Client = mock.MagicMock(return_value=fake_glance) with mock.patch.dict("sys.modules", {"glanceclient": mock_glance, "keystoneauth1": mock_keystoneauth1}): self.assertNotIn("glance", self.clients.cache) client = self.clients.glance() self.assertEqual(fake_glance, client) kw = { "version": "2", "session": mock_keystoneauth1.session.Session(), "endpoint_override": mock_glance__get_endpoint.return_value} mock_glance.Client.assert_called_once_with(**kw) self.assertEqual(fake_glance, self.clients.cache["glance"]) @mock.patch("%s.Cinder._get_endpoint" % PATH) def test_cinder(self, mock_cinder__get_endpoint): fake_cinder = mock.MagicMock(client=fakes.FakeCinderClient()) mock_cinder = mock.MagicMock() mock_cinder.client.Client.return_value = fake_cinder mock_cinder__get_endpoint.return_value = "http://fake.to:2/fake" mock_keystoneauth1 = mock.MagicMock() self.assertNotIn("cinder", self.clients.cache) with mock.patch.dict("sys.modules", {"cinderclient": mock_cinder, "keystoneauth1": mock_keystoneauth1}): client = self.clients.cinder() self.assertEqual(fake_cinder, client) kw = { "session": mock_keystoneauth1.session.Session(), "endpoint_override": mock_cinder__get_endpoint.return_value} mock_cinder.client.Client.assert_called_once_with( "3", **kw) self.assertEqual(fake_cinder, self.clients.cache["cinder"]) @mock.patch("%s.Manila._get_endpoint" % PATH) def test_manila(self, mock_manila__get_endpoint): mock_manila = mock.MagicMock() mock_manila__get_endpoint.return_value = "http://fake.to:2/fake" mock_keystoneauth1 = mock.MagicMock() self.assertNotIn("manila", self.clients.cache) with mock.patch.dict("sys.modules", {"manilaclient": mock_manila, "keystoneauth1": mock_keystoneauth1}): client = self.clients.manila() self.assertEqual(mock_manila.client.Client.return_value, client) kw = { "insecure": False, "session": mock_keystoneauth1.session.Session(), "service_catalog_url": mock_manila__get_endpoint.return_value } mock_manila.client.Client.assert_called_once_with("1", **kw) self.assertEqual( mock_manila.client.Client.return_value, self.clients.cache["manila"]) def test_manila_validate_version(self): osclients.Manila.validate_version("2.0") osclients.Manila.validate_version("2.32") self.assertRaises(exceptions.RallyException, osclients.Manila.validate_version, "foo") def test_gnocchi(self): fake_gnocchi = fakes.FakeGnocchiClient() mock_gnocchi = mock.MagicMock() mock_gnocchi.client.Client.return_value = fake_gnocchi mock_keystoneauth1 = mock.MagicMock() self.assertNotIn("gnocchi", self.clients.cache) self.credential["endpoint_type"] = "internal" with mock.patch.dict("sys.modules", {"gnocchiclient": mock_gnocchi, "keystoneauth1": mock_keystoneauth1}): mock_keystoneauth1.discover.Discover.return_value = ( mock.Mock(version_data=mock.Mock(return_value=[ {"version": (1, 0)}])) ) client = self.clients.gnocchi() self.assertEqual(fake_gnocchi, client) kw = {"version": "1", "session": mock_keystoneauth1.session.Session(), "adapter_options": {"service_type": "metric", "interface": "internal"}} mock_gnocchi.client.Client.assert_called_once_with(**kw) self.assertEqual(fake_gnocchi, self.clients.cache["gnocchi"]) def test_monasca(self): fake_monasca = fakes.FakeMonascaClient() mock_monasca = mock.MagicMock() mock_monasca.client.Client.return_value = fake_monasca self.assertNotIn("monasca", self.clients.cache) with mock.patch.dict("sys.modules", {"monascaclient": mock_monasca}): client = self.clients.monasca() self.assertEqual(fake_monasca, client) self.service_catalog.url_for.assert_called_once_with( service_type="monitoring", region_name=self.credential.region_name) os_endpoint = self.service_catalog.url_for.return_value kw = {"token": self.auth_ref.auth_token, "timeout": cfg.CONF.openstack_client_http_timeout, "insecure": False, "cacert": None, "username": self.credential.username, "password": self.credential.password, "tenant_name": self.credential.tenant_name, "auth_url": self.credential.auth_url } mock_monasca.client.Client.assert_called_once_with("2_0", os_endpoint, **kw) self.assertEqual(mock_monasca.client.Client.return_value, self.clients.cache["monasca"]) @mock.patch("%s.Ironic._get_endpoint" % PATH) def test_ironic(self, mock_ironic__get_endpoint): fake_ironic = fakes.FakeIronicClient() mock_ironic = mock.MagicMock() mock_ironic.client.get_client = mock.MagicMock( return_value=fake_ironic) mock_ironic__get_endpoint.return_value = "http://fake.to:2/fake" mock_keystoneauth1 = mock.MagicMock() self.assertNotIn("ironic", self.clients.cache) with mock.patch.dict("sys.modules", {"ironicclient": mock_ironic, "keystoneauth1": mock_keystoneauth1}): client = self.clients.ironic() self.assertEqual(fake_ironic, client) kw = { "session": mock_keystoneauth1.session.Session(), "endpoint": mock_ironic__get_endpoint.return_value} mock_ironic.client.get_client.assert_called_once_with("1", **kw) self.assertEqual(fake_ironic, self.clients.cache["ironic"]) @mock.patch("%s.Sahara._get_endpoint" % PATH) def test_sahara(self, mock_sahara__get_endpoint): fake_sahara = fakes.FakeSaharaClient() mock_sahara = mock.MagicMock() mock_sahara.client.Client = mock.MagicMock(return_value=fake_sahara) mock_sahara__get_endpoint.return_value = "http://fake.to:2/fake" mock_keystoneauth1 = mock.MagicMock() self.assertNotIn("sahara", self.clients.cache) with mock.patch.dict("sys.modules", {"saharaclient": mock_sahara, "keystoneauth1": mock_keystoneauth1}): client = self.clients.sahara() self.assertEqual(fake_sahara, client) kw = { "session": mock_keystoneauth1.session.Session(), "sahara_url": mock_sahara__get_endpoint.return_value} mock_sahara.client.Client.assert_called_once_with(1.1, **kw) self.assertEqual(fake_sahara, self.clients.cache["sahara"]) def test_zaqar(self): fake_zaqar = fakes.FakeZaqarClient() mock_zaqar = mock.MagicMock() mock_zaqar.client.Client = mock.MagicMock(return_value=fake_zaqar) self.assertNotIn("zaqar", self.clients.cache) mock_keystoneauth1 = mock.MagicMock() with mock.patch.dict("sys.modules", {"zaqarclient.queues": mock_zaqar, "keystoneauth1": mock_keystoneauth1}): client = self.clients.zaqar() self.assertEqual(fake_zaqar, client) self.service_catalog.url_for.assert_called_once_with( service_type="messaging", region_name=self.credential.region_name) fake_zaqar_url = self.service_catalog.url_for.return_value mock_zaqar.client.Client.assert_called_once_with( url=fake_zaqar_url, version=1.1, session=mock_keystoneauth1.session.Session()) self.assertEqual(fake_zaqar, self.clients.cache["zaqar"], mock_keystoneauth1.session.Session()) @mock.patch("%s.Trove._get_endpoint" % PATH) def test_trove(self, mock_trove__get_endpoint): fake_trove = fakes.FakeTroveClient() mock_trove = mock.MagicMock() mock_trove.client.Client = mock.MagicMock(return_value=fake_trove) mock_trove__get_endpoint.return_value = "http://fake.to:2/fake" mock_keystoneauth1 = mock.MagicMock() self.assertNotIn("trove", self.clients.cache) with mock.patch.dict("sys.modules", {"troveclient": mock_trove, "keystoneauth1": mock_keystoneauth1}): client = self.clients.trove() self.assertEqual(fake_trove, client) kw = { "session": mock_keystoneauth1.session.Session(), "endpoint": mock_trove__get_endpoint.return_value} mock_trove.client.Client.assert_called_once_with("1.0", **kw) self.assertEqual(fake_trove, self.clients.cache["trove"]) def test_mistral(self): fake_mistral = fakes.FakeMistralClient() mock_mistral = mock.Mock() mock_mistral.client.client.return_value = fake_mistral self.assertNotIn("mistral", self.clients.cache) with mock.patch.dict( "sys.modules", {"mistralclient": mock_mistral, "mistralclient.api": mock_mistral}): client = self.clients.mistral() self.assertEqual(fake_mistral, client) self.service_catalog.url_for.assert_called_once_with( service_type="workflowv2", region_name=self.credential.region_name ) fake_mistral_url = self.service_catalog.url_for.return_value mock_mistral.client.client.assert_called_once_with( mistral_url=fake_mistral_url, service_type="workflowv2", auth_token=self.auth_ref.auth_token ) self.assertEqual(fake_mistral, self.clients.cache["mistral"]) def test_swift(self): fake_swift = fakes.FakeSwiftClient() mock_swift = mock.MagicMock() mock_swift.client.Connection = mock.MagicMock(return_value=fake_swift) self.assertNotIn("swift", self.clients.cache) with mock.patch.dict("sys.modules", {"swiftclient": mock_swift}): client = self.clients.swift() self.assertEqual(fake_swift, client) self.service_catalog.url_for.assert_called_once_with( service_type="object-store", region_name=self.credential.region_name) kw = {"retries": 1, "preauthurl": self.service_catalog.url_for.return_value, "preauthtoken": self.auth_ref.auth_token, "insecure": False, "cacert": None, "user": self.credential.username, "tenant_name": self.credential.tenant_name, } mock_swift.client.Connection.assert_called_once_with(**kw) self.assertEqual(fake_swift, self.clients.cache["swift"]) @mock.patch("%s.Keystone.service_catalog" % PATH) def test_services(self, mock_keystone_service_catalog): available_services = {consts.ServiceType.IDENTITY: {}, consts.ServiceType.COMPUTE: {}, "some_service": {}} mock_get_endpoints = mock_keystone_service_catalog.get_endpoints mock_get_endpoints.return_value = available_services clients = osclients.Clients(self.credential) self.assertEqual( {consts.ServiceType.IDENTITY: consts.Service.KEYSTONE, consts.ServiceType.COMPUTE: consts.Service.NOVA, "some_service": "__unknown__"}, clients.services()) def test_murano(self): fake_murano = fakes.FakeMuranoClient() mock_murano = mock.Mock() mock_murano.client.Client.return_value = fake_murano self.assertNotIn("murano", self.clients.cache) with mock.patch.dict("sys.modules", {"muranoclient": mock_murano}): client = self.clients.murano() self.assertEqual(fake_murano, client) self.service_catalog.url_for.assert_called_once_with( service_type="application-catalog", region_name=self.credential.region_name ) kw = {"endpoint": self.service_catalog.url_for.return_value, "token": self.auth_ref.auth_token} mock_murano.client.Client.assert_called_once_with("1", **kw) self.assertEqual(fake_murano, self.clients.cache["murano"]) @mock.patch("%s.Keystone.get_session" % PATH) @ddt.data( {}, {"version": "2"}, {"version": None} ) @ddt.unpack def test_designate(self, mock_keystone_get_session, version=None): fake_designate = fakes.FakeDesignateClient() mock_designate = mock.Mock() mock_designate.client.Client.return_value = fake_designate mock_keystone_get_session.return_value = ("fake_session", "fake_auth_plugin") self.assertNotIn("designate", self.clients.cache) with mock.patch.dict("sys.modules", {"designateclient": mock_designate}): if version is not None: client = self.clients.designate(version=version) else: client = self.clients.designate() self.assertEqual(fake_designate, client) self.service_catalog.url_for.assert_called_once_with( service_type="dns", region_name=self.credential.region_name ) default = version or "2" # Check that we append /v<version> url = self.service_catalog.url_for.return_value url.__iadd__.assert_called_once_with("/v%s" % default) mock_keystone_get_session.assert_called_once_with() mock_designate.client.Client.assert_called_once_with( default, endpoint_override=url.__iadd__.return_value, session="fake_session") key = "designate" if version is not None: key += "%s" % {"version": version} self.assertEqual(fake_designate, self.clients.cache[key]) def test_senlin(self): mock_senlin = mock.MagicMock() self.assertNotIn("senlin", self.clients.cache) with mock.patch.dict("sys.modules", {"senlinclient": mock_senlin}): client = self.clients.senlin() self.assertEqual(mock_senlin.client.Client.return_value, client) mock_senlin.client.Client.assert_called_once_with( "1", username=self.credential.username, password=self.credential.password, project_name=self.credential.tenant_name, cert=self.credential.cacert, auth_url=self.credential.auth_url) self.assertEqual( mock_senlin.client.Client.return_value, self.clients.cache["senlin"]) @mock.patch("%s.Magnum._get_endpoint" % PATH) def test_magnum(self, mock_magnum__get_endpoint): fake_magnum = fakes.FakeMagnumClient() mock_magnum = mock.MagicMock() mock_magnum.client.Client.return_value = fake_magnum mock_magnum__get_endpoint.return_value = "http://fake.to:2/fake" mock_keystoneauth1 = mock.MagicMock() self.assertNotIn("magnum", self.clients.cache) with mock.patch.dict("sys.modules", {"magnumclient": mock_magnum, "keystoneauth1": mock_keystoneauth1}): client = self.clients.magnum() self.assertEqual(fake_magnum, client) kw = { "interface": self.credential.endpoint_type, "session": mock_keystoneauth1.session.Session(), "magnum_url": mock_magnum__get_endpoint.return_value} mock_magnum.client.Client.assert_called_once_with(**kw) self.assertEqual(fake_magnum, self.clients.cache["magnum"]) @mock.patch("%s.Watcher._get_endpoint" % PATH) def test_watcher(self, mock_watcher__get_endpoint): fake_watcher = fakes.FakeWatcherClient() mock_watcher = mock.MagicMock() mock_watcher__get_endpoint.return_value = "http://fake.to:2/fake" mock_keystoneauth1 = mock.MagicMock() mock_watcher.client.Client.return_value = fake_watcher self.assertNotIn("watcher", self.clients.cache) with mock.patch.dict("sys.modules", {"watcherclient": mock_watcher, "keystoneauth1": mock_keystoneauth1}): client = self.clients.watcher() self.assertEqual(fake_watcher, client) kw = { "session": mock_keystoneauth1.session.Session(), "endpoint": mock_watcher__get_endpoint.return_value} mock_watcher.client.Client.assert_called_once_with("1", **kw) self.assertEqual(fake_watcher, self.clients.cache["watcher"]) @mock.patch("%s.Barbican._get_endpoint" % PATH) def test_barbican(self, mock_barbican__get_endpoint): fake_barbican = fakes.FakeBarbicanClient() mock_barbican = mock.MagicMock() mock_barbican__get_endpoint.return_value = "http://fake.to:2/fake" mock_keystoneauth1 = mock.MagicMock() mock_barbican.client.Client.return_value = fake_barbican with mock.patch.dict("sys.modules", {"barbicanclient": mock_barbican, "keystoneauth1": mock_keystoneauth1}): client = self.clients.barbican() self.assertEqual(fake_barbican, client) kw = { "session": mock_keystoneauth1.session.Session(), "version": "v1" } mock_barbican.client.Client.assert_called_once_with(**kw) self.assertEqual(fake_barbican, self.clients.cache["barbican"]) class AuthenticationFailedTestCase(test.TestCase): def test_init(self): from keystoneauth1 import exceptions as ks_exc actual_exc = ks_exc.ConnectionError("Something") exc = osclients.AuthenticationFailed( error=actual_exc, url="https://example.com", username="user", project="project") # only original exc should be used self.assertEqual("Something", exc.format_message()) actual_exc = Exception("Something") exc = osclients.AuthenticationFailed( error=actual_exc, url="https://example.com", username="user", project="project") # additional info should be added self.assertEqual("Failed to authenticate to https://example.com for " "user 'user' in project 'project': " "[Exception] Something", exc.format_message()) # check cutting message actual_exc = ks_exc.DiscoveryFailure( "Could not find versioned identity endpoints when attempting to " "authenticate. Please check that your auth_url is correct. " "Unable to establish connection to https://example.com: " "HTTPConnectionPool(host='example.com', port=80): Max retries " "exceeded with url: / (Caused by NewConnectionError('" "<urllib3.connection.HTTPConnection object at 0x7f32ab9809d0>: " "Failed to establish a new connection: [Errno -2] Name or service" " not known',))") exc = osclients.AuthenticationFailed( error=actual_exc, url="https://example.com", username="user", project="project") # original message should be simplified self.assertEqual( "Could not find versioned identity endpoints when attempting to " "authenticate. Please check that your auth_url is correct. " "Unable to establish connection to https://example.com", exc.format_message())
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,771
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/nova/aggregates.py
# Copyright 2016 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally import exceptions from rally.task import types from rally.task import validation from rally_openstack.common import consts from rally_openstack.task import scenario from rally_openstack.task.scenarios.nova import utils """Scenarios for Nova aggregates.""" @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True) @scenario.configure(name="NovaAggregates.list_aggregates", platform="openstack") class ListAggregates(utils.NovaScenario): def run(self): """List all nova aggregates. Measure the "nova aggregate-list" command performance. """ self._list_aggregates() @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True) @scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, name="NovaAggregates.create_and_list_aggregates", platform="openstack") class CreateAndListAggregates(utils.NovaScenario): """scenario for create and list aggregate.""" def run(self, availability_zone): """Create a aggregate and then list all aggregates. This scenario creates a aggregate and then lists all aggregates. :param availability_zone: The availability zone of the aggregate """ aggregate = self._create_aggregate(availability_zone) msg = "Aggregate isn't created" self.assertTrue(aggregate, err_msg=msg) all_aggregates = self._list_aggregates() msg = ("Created aggregate is not in the" " list of all available aggregates") self.assertIn(aggregate, all_aggregates, err_msg=msg) @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True) @scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, name="NovaAggregates.create_and_delete_aggregate", platform="openstack") class CreateAndDeleteAggregate(utils.NovaScenario): """Scenario for create and delete aggregate.""" def run(self, availability_zone): """Create an aggregate and then delete it. This scenario first creates an aggregate and then delete it. :param availability_zone: The availability zone of the aggregate """ aggregate = self._create_aggregate(availability_zone) self._delete_aggregate(aggregate) @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True) @scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, name="NovaAggregates.create_and_update_aggregate", platform="openstack") class CreateAndUpdateAggregate(utils.NovaScenario): """Scenario for create and update aggregate.""" def run(self, availability_zone): """Create an aggregate and then update its name and availability_zone This scenario first creates an aggregate and then update its name and availability_zone :param availability_zone: The availability zone of the aggregate """ aggregate = self._create_aggregate(availability_zone) self._update_aggregate(aggregate) @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True) @scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, name="NovaAggregates.create_aggregate_add_and_remove_host", platform="openstack") class CreateAggregateAddAndRemoveHost(utils.NovaScenario): """Scenario for add a host to and remove the host from an aggregate.""" def run(self, availability_zone): """Create an aggregate, add a host to and remove the host from it Measure "nova aggregate-add-host" and "nova aggregate-remove-host" command performance. :param availability_zone: The availability zone of the aggregate """ aggregate = self._create_aggregate(availability_zone) hosts = self._list_hypervisors() host_name = hosts[0].service["host"] self._aggregate_add_host(aggregate, host_name) self._aggregate_remove_host(aggregate, host_name) @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True) @scenario.configure(context={"admin_cleanup@openstack": ["nova"]}, name="NovaAggregates.create_and_get_aggregate_details", platform="openstack") class CreateAndGetAggregateDetails(utils.NovaScenario): """Scenario for create and get aggregate details.""" def run(self, availability_zone): """Create an aggregate and then get its details. This scenario first creates an aggregate and then get details of it. :param availability_zone: The availability zone of the aggregate """ aggregate = self._create_aggregate(availability_zone) self._get_aggregate_details(aggregate) @types.convert(image={"type": "glance_image"}) @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True, users=True) @scenario.configure( context={"admin_cleanup@openstack": ["nova"], "cleanup@openstack": ["nova"]}, name="NovaAggregates.create_aggregate_add_host_and_boot_server", platform="openstack") class CreateAggregateAddHostAndBootServer(utils.NovaScenario): """Scenario to verify an aggregate.""" def run(self, image, metadata, availability_zone=None, ram=512, vcpus=1, disk=1, boot_server_kwargs=None): """Scenario to create and verify an aggregate This scenario creates an aggregate, adds a compute host and metadata to the aggregate, adds the same metadata to the flavor and creates an instance. Verifies that instance host is one of the hosts in the aggregate. :param image: The image ID to boot from :param metadata: The metadata to be set as flavor extra specs :param availability_zone: The availability zone of the aggregate :param ram: Memory in MB for the flavor :param vcpus: Number of VCPUs for the flavor :param disk: Size of local disk in GB :param boot_server_kwargs: Optional additional arguments to verify host aggregates :raises RallyException: if instance and aggregate hosts do not match """ boot_server_kwargs = boot_server_kwargs or {} aggregate = self._create_aggregate(availability_zone) hosts = self._list_hypervisors() host_name = None for i in range(len(hosts)): if hosts[i].state == "up" and hosts[i].status == "enabled": host_name = hosts[i].service["host"] break if not host_name: raise exceptions.RallyException("Could not find an available host") self._aggregate_set_metadata(aggregate, metadata) self._aggregate_add_host(aggregate, host_name) flavor = self._create_flavor(ram, vcpus, disk) flavor.set_keys(metadata) server = self._boot_server(image, flavor.id, **boot_server_kwargs) # NOTE: we need to get server object by admin user to obtain # "hypervisor_hostname" attribute server = self.admin_clients("nova").servers.get(server.id) instance_hostname = getattr(server, "OS-EXT-SRV-ATTR:hypervisor_hostname") if instance_hostname != host_name: raise exceptions.RallyException("Instance host and aggregate " "host are different")
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,772
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/senlin/clusters.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.task import validation from rally_openstack.common import consts from rally_openstack.task import scenario from rally_openstack.task.scenarios.senlin import utils """Scenarios for Senlin clusters.""" @validation.add("required_platform", platform="openstack", admin=True) @validation.add("required_services", services=[consts.Service.SENLIN]) @validation.add("required_contexts", contexts=("profiles")) @scenario.configure(context={"admin_cleanup@openstack": ["senlin"]}, name="SenlinClusters.create_and_delete_cluster", platform="openstack") class CreateAndDeleteCluster(utils.SenlinScenario): def run(self, desired_capacity=0, min_size=0, max_size=-1, timeout=3600, metadata=None): """Create a cluster and then delete it. Measure the "senlin cluster-create" and "senlin cluster-delete" commands performance. :param desired_capacity: The capacity or initial number of nodes owned by the cluster :param min_size: The minimum number of nodes owned by the cluster :param max_size: The maximum number of nodes owned by the cluster. -1 means no limit :param timeout: The timeout value in seconds for cluster creation :param metadata: A set of key value pairs to associate with the cluster """ profile_id = self.context["tenant"]["profile"] cluster = self._create_cluster(profile_id, desired_capacity, min_size, max_size, timeout, metadata) self._delete_cluster(cluster)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,773
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/barbican/test_orders.py
# Copyright 2018 Red Hat Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally_openstack.task.scenarios.barbican import orders from tests.unit import test class BarbicanOrdersTestCase(test.ScenarioTestCase): def get_test_context(self): context = super(BarbicanOrdersTestCase, self).get_test_context() context.update({ "admin": { "user_id": "fake", "credential": mock.MagicMock() }, "user": { "user_id": "fake", "credential": mock.MagicMock() }, "tenant": {"id": "fake"} }) return context def setUp(self): super(BarbicanOrdersTestCase, self).setUp() m = "rally_openstack.common.services.key_manager.barbican" patch = mock.patch("%s.BarbicanService" % m) self.addCleanup(patch.stop) self.mock_secrets = patch.start() def test_list_orders(self): barbican_service = self.mock_secrets.return_value scenario = orders.BarbicanOrdersList(self.context) scenario.run() barbican_service.orders_list.assert_called_once_with() def test_key_create_and_delete(self): keys = {"order_ref": "fake-key"} barbican_service = self.mock_secrets.return_value scenario = orders.BarbicanOrdersCreateKeyAndDelete(self.context) scenario.run() keys = barbican_service.create_key.return_value barbican_service.create_key.assert_called_once_with() barbican_service.orders_delete.assert_called_once_with( keys.order_ref) def test_certificate_create_and_delete(self): certificate = {"order_ref": "fake-certificate"} barbican_service = self.mock_secrets.return_value scenario = orders.BarbicanOrdersCreateCertificateAndDelete( self.context) scenario.run() certificate = barbican_service.create_certificate.return_value barbican_service.create_certificate.assert_called_once_with() barbican_service.orders_delete.assert_called_once_with( certificate.order_ref) def test_asymmetric_create_and_delete(self): certificate = {"order_ref": "fake-certificate"} barbican_service = self.mock_secrets.return_value scenario = orders.BarbicanOrdersCreateAsymmetricAndDelete( self.context) scenario.run() certificate = barbican_service.create_asymmetric.return_value barbican_service.create_asymmetric.assert_called_once_with() barbican_service.orders_delete.assert_called_once_with( certificate.order_ref)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,774
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/elasticsearch/logging.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import requests from rally.common import cfg from rally.common import logging from rally.common import utils as commonutils from rally.task import atomic from rally.task import types from rally.task import validation from rally_openstack.common import consts from rally_openstack.task import scenario from rally_openstack.task.scenarios.nova import utils as nova_utils CONF = cfg.CONF LOG = logging.getLogger(__name__) """Scenario for Elasticsearch logging system.""" @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="ElasticsearchLogging.log_instance", platform="openstack") class ElasticsearchLogInstanceName(nova_utils.NovaScenario): """Test logging instance in conjunction with Elasticsearch system. Let OpenStack platform already has logging agent (for example, Filebeat), which sends nova logs to Elasticsearch through data processing pipeline (e.g. Logstash). The test verifies Openstack nova logs stored in logging system. It creates nova instance with random name and after instance becomes available, checks it's name in Elasticsearch indices by querying. """ @atomic.action_timer("elasticsearch.check_server_log_indexed") def _check_server_name(self, server_id, logging_vip, elasticsearch_port, sleep_time, retries_total, additional_query=None): request_data = { "query": { "bool": { "must": [{"match_phrase": {"Payload": server_id}}] } } } if additional_query: request_data["query"]["bool"].update(additional_query) LOG.info("Check server ID %s in elasticsearch" % server_id) i = 0 while i < retries_total: LOG.debug("Attempt number %s" % (i + 1)) resp = requests.get("http://%(ip)s:%(port)s/_search" % { "ip": logging_vip, "port": elasticsearch_port}, data=json.dumps(request_data)) result = resp.json() if result["hits"]["total"] < 1 and i + 1 >= retries_total: LOG.debug("No instance data found in Elasticsearch") self.assertGreater(result["hits"]["total"], 0) elif result["hits"]["total"] < 1: i += 1 commonutils.interruptable_sleep(sleep_time) else: LOG.debug("Instance data found in Elasticsearch") self.assertGreater(result["hits"]["total"], 0) break def run(self, image, flavor, logging_vip, elasticsearch_port, sleep_time=5, retries_total=30, boot_server_kwargs=None, force_delete=False, query_by_name=False, additional_query=None): """Create nova instance and check it indexed in elasticsearch. :param image: image for server :param flavor: flavor for server :param logging_vip: logging system IP to check server name in elasticsearch index :param boot_server_kwargs: special server kwargs for boot :param force_delete: force delete server or not :param elasticsearch_port: elasticsearch port to use for check server :param additional_query: map of additional arguments for scenario elasticsearch query to check nova info in els index. :param query_by_name: query nova server by name if True otherwise by id :param sleep_time: sleep time in seconds between elasticsearch request :param retries_total: total number of retries to check server name in elasticsearch """ server = self._boot_server(image, flavor, **(boot_server_kwargs or {})) if query_by_name: server_id = server.name else: server_id = server.id self._check_server_name(server_id, logging_vip, elasticsearch_port, sleep_time, retries_total, additional_query=additional_query) self._delete_server(server, force=force_delete)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,775
openstack/rally-openstack
refs/heads/master
/tests/unit/task/contexts/nova/test_servers.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from rally_openstack.task.contexts.nova import servers from rally_openstack.task.scenarios.nova import utils as nova_utils from tests.unit import fakes from tests.unit import test CTX = "rally_openstack.task.contexts.nova" SCN = "rally_openstack.task.scenarios" TYP = "rally_openstack.task.types" class ServerGeneratorTestCase(test.ScenarioTestCase): def _gen_tenants(self, count): tenants = {} for id_ in range(count): tenants[str(id_)] = {"name": str(id_)} return tenants def test_init(self): tenants_count = 2 servers_per_tenant = 5 self.context.update({ "config": { "servers": { "servers_per_tenant": servers_per_tenant, } }, "tenants": self._gen_tenants(tenants_count)}) inst = servers.ServerGenerator(self.context) self.assertEqual({"auto_assign_nic": False, "servers_per_tenant": 5}, inst.config) @mock.patch("%s.nova.utils.NovaScenario._boot_servers" % SCN, return_value=[ fakes.FakeServer(id="uuid"), fakes.FakeServer(id="uuid"), fakes.FakeServer(id="uuid"), fakes.FakeServer(id="uuid"), fakes.FakeServer(id="uuid") ]) @mock.patch("%s.GlanceImage" % TYP) @mock.patch("%s.Flavor" % TYP) def test_setup(self, mock_flavor, mock_glance_image, mock_nova_scenario__boot_servers): tenants_count = 2 users_per_tenant = 5 servers_per_tenant = 5 tenants = self._gen_tenants(tenants_count) users = [] for id_ in tenants.keys(): for i in range(users_per_tenant): users.append({"id": i, "tenant_id": id_, "credential": mock.MagicMock()}) self.context.update({ "config": { "users": { "tenants": 2, "users_per_tenant": 5, "concurrent": 10, }, "servers": { "auto_assign_nic": True, "servers_per_tenant": 5, "image": { "name": "cirros-0.5.2-x86_64-uec", }, "flavor": { "name": "m1.tiny", }, "nics": ["foo", "bar"] }, }, "admin": { "credential": mock.MagicMock() }, "users": users, "tenants": tenants }) new_context = copy.deepcopy(self.context) for id_ in new_context["tenants"]: new_context["tenants"][id_].setdefault("servers", []) for i in range(servers_per_tenant): new_context["tenants"][id_]["servers"].append("uuid") servers_ctx = servers.ServerGenerator(self.context) servers_ctx.setup() self.assertEqual(new_context, self.context) image_id = mock_glance_image.return_value.pre_process.return_value flavor_id = mock_flavor.return_value.pre_process.return_value servers_ctx_config = self.context["config"]["servers"] expected_auto_nic = servers_ctx_config.get("auto_assign_nic", False) expected_requests = servers_ctx_config.get("servers_per_tenant", False) called_times = len(tenants) mock_calls = [mock.call(image_id, flavor_id, auto_assign_nic=expected_auto_nic, nics=[{"net-id": "foo"}, {"net-id": "bar"}], requests=expected_requests) for i in range(called_times)] mock_nova_scenario__boot_servers.assert_has_calls(mock_calls) @mock.patch("%s.servers.resource_manager.cleanup" % CTX) def test_cleanup(self, mock_cleanup): tenants_count = 2 users_per_tenant = 5 servers_per_tenant = 5 tenants = self._gen_tenants(tenants_count) users = [] for id_ in tenants.keys(): for i in range(users_per_tenant): users.append({"id": i, "tenant_id": id_, "credential": "credential"}) tenants[id_].setdefault("servers", []) for j in range(servers_per_tenant): tenants[id_]["servers"].append("uuid") self.context.update({ "config": { "users": { "tenants": 2, "users_per_tenant": 5, "concurrent": 10, }, "servers": { "servers_per_tenant": 5, "image": { "name": "cirros-0.5.2-x86_64-uec", }, "flavor": { "name": "m1.tiny", }, }, }, "admin": { "credential": mock.MagicMock() }, "users": users, "tenants": tenants }) servers_ctx = servers.ServerGenerator(self.context) servers_ctx.cleanup() mock_cleanup.assert_called_once_with( names=["nova.servers"], users=self.context["users"], superclass=nova_utils.NovaScenario, task_id=self.context["owner_id"])
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,776
openstack/rally-openstack
refs/heads/master
/tests/unit/task/contexts/quotas/test_neutron_quotas.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally_openstack.task.contexts.quotas import neutron_quotas from tests.unit import test class NeutronQuotasTestCase(test.TestCase): def setUp(self): super(NeutronQuotasTestCase, self).setUp() self.quotas = { "network": 20, "subnet": 20, "port": 100, "router": 20, "floatingip": 100, "security_group": 100, "security_group_rule": 100 } def test_update(self): clients = mock.MagicMock() neutron_quo = neutron_quotas.NeutronQuotas(clients) tenant_id = mock.MagicMock() neutron_quo.update(tenant_id, **self.quotas) body = {"quota": self.quotas} clients.neutron().update_quota.assert_called_once_with(tenant_id, body=body) def test_delete(self): clients = mock.MagicMock() neutron_quo = neutron_quotas.NeutronQuotas(clients) tenant_id = mock.MagicMock() neutron_quo.delete(tenant_id) clients.neutron().delete_quota.assert_called_once_with(tenant_id) def test_get(self): tenant_id = "tenant_id" clients = mock.MagicMock() clients.neutron.return_value.show_quota.return_value = { "quota": self.quotas} neutron_quo = neutron_quotas.NeutronQuotas(clients) self.assertEqual(self.quotas, neutron_quo.get(tenant_id)) clients.neutron().show_quota.assert_called_once_with(tenant_id)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,777
openstack/rally-openstack
refs/heads/master
/tests/unit/task/contexts/cleanup/test_admin.py
# Copyright 2014: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from rally.common import utils from rally.task import context from rally_openstack.task.contexts.cleanup import admin from rally_openstack.task import scenario from tests.unit import test ADMIN = "rally_openstack.task.contexts.cleanup.admin" BASE = "rally_openstack.task.contexts.cleanup.base" @ddt.ddt class AdminCleanupTestCase(test.TestCase): @mock.patch("%s.manager" % BASE) @ddt.data((["a", "b"], True), (["a", "e"], False), (3, False)) @ddt.unpack def test_validate(self, config, valid, mock_manager): mock_manager.list_resource_names.return_value = {"a", "b", "c"} results = context.Context.validate( "admin_cleanup", None, None, config, allow_hidden=True) if valid: self.assertEqual([], results) else: self.assertGreater(len(results), 0) @mock.patch("rally.common.plugin.discover.itersubclasses") @mock.patch("%s.manager.find_resource_managers" % ADMIN, return_value=[mock.MagicMock(), mock.MagicMock()]) @mock.patch("%s.manager.SeekAndDestroy" % ADMIN) def test_cleanup(self, mock_seek_and_destroy, mock_find_resource_managers, mock_itersubclasses): class ResourceClass(utils.RandomNameGeneratorMixin): pass mock_itersubclasses.return_value = [ResourceClass] ctx = { "config": {"admin_cleanup": ["a", "b"]}, "admin": mock.MagicMock(), "users": mock.MagicMock(), "task": {"uuid": "task_id"} } admin_cleanup = admin.AdminCleanup(ctx) admin_cleanup.setup() admin_cleanup.cleanup() mock_itersubclasses.assert_called_once_with(scenario.OpenStackScenario) mock_find_resource_managers.assert_called_once_with(("a", "b"), True) mock_seek_and_destroy.assert_has_calls([ mock.call(mock_find_resource_managers.return_value[0], ctx["admin"], ctx["users"], resource_classes=[ResourceClass], task_id="task_id"), mock.call().exterminate(), mock.call(mock_find_resource_managers.return_value[1], ctx["admin"], ctx["users"], resource_classes=[ResourceClass], task_id="task_id"), mock.call().exterminate() ])
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,778
openstack/rally-openstack
refs/heads/master
/rally_openstack/common/cfg/opts.py
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally_openstack.common.cfg import cinder from rally_openstack.common.cfg import glance from rally_openstack.common.cfg import heat from rally_openstack.common.cfg import ironic from rally_openstack.common.cfg import magnum from rally_openstack.common.cfg import manila from rally_openstack.common.cfg import mistral from rally_openstack.common.cfg import monasca from rally_openstack.common.cfg import murano from rally_openstack.common.cfg import neutron from rally_openstack.common.cfg import nova from rally_openstack.common.cfg import octavia from rally_openstack.common.cfg import osclients from rally_openstack.common.cfg import profiler from rally_openstack.common.cfg import sahara from rally_openstack.common.cfg import senlin from rally_openstack.common.cfg import vm from rally_openstack.common.cfg import watcher from rally_openstack.common.cfg import tempest from rally_openstack.common.cfg import keystone_roles from rally_openstack.common.cfg import keystone_users from rally_openstack.common.cfg import cleanup from rally_openstack.task.ui.charts import osprofilerchart def list_opts(): opts = {} for l_opts in (cinder.OPTS, heat.OPTS, ironic.OPTS, magnum.OPTS, manila.OPTS, mistral.OPTS, monasca.OPTS, murano.OPTS, nova.OPTS, osclients.OPTS, profiler.OPTS, sahara.OPTS, vm.OPTS, glance.OPTS, watcher.OPTS, tempest.OPTS, keystone_roles.OPTS, keystone_users.OPTS, cleanup.OPTS, senlin.OPTS, neutron.OPTS, octavia.OPTS, osprofilerchart.OPTS): for category, opt in l_opts.items(): opts.setdefault(category, []) opts[category].extend(opt) return [(k, v) for k, v in opts.items()]
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,779
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/nova/test_utils.py
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from rally.common import cfg from rally import exceptions as rally_exceptions from rally_openstack.task.scenarios.nova import utils from tests.unit import fakes from tests.unit import test BM_UTILS = "rally.task.utils" NOVA_UTILS = "rally_openstack.task.scenarios.nova.utils" CONF = cfg.CONF @ddt.ddt class NovaScenarioTestCase(test.ScenarioTestCase): def setUp(self): super(NovaScenarioTestCase, self).setUp() self.server = mock.Mock() self.server1 = mock.Mock() self.volume = mock.Mock() self.floating_ip = mock.Mock() self.image = mock.Mock() self.context.update( {"user": {"id": "fake_user_id", "credential": mock.MagicMock()}, "tenant": {"id": "fake_tenant"}}) def _context_with_secgroup(self, secgroup): retval = {"user": {"secgroup": secgroup, "credential": mock.MagicMock()}} retval.update(self.context) return retval def test__list_servers(self): servers_list = [] self.clients("nova").servers.list.return_value = servers_list nova_scenario = utils.NovaScenario(self.context) return_servers_list = nova_scenario._list_servers(True) self.assertEqual(servers_list, return_servers_list) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.list_servers") def test__pick_random_nic(self): context = {"tenant": {"networks": [{"id": "net_id_1"}, {"id": "net_id_2"}]}, "iteration": 0} nova_scenario = utils.NovaScenario(context=context) nic1 = nova_scenario._pick_random_nic() self.assertEqual(nic1, [{"net-id": "net_id_1"}]) context["iteration"] = 1 nova_scenario = utils.NovaScenario(context=context) nic2 = nova_scenario._pick_random_nic() # balance to net 2 self.assertEqual(nic2, [{"net-id": "net_id_2"}]) context["iteration"] = 2 nova_scenario = utils.NovaScenario(context=context) nic3 = nova_scenario._pick_random_nic() # balance again, get net 1 self.assertEqual(nic3, [{"net-id": "net_id_1"}]) def test__get_network_id(self): networks = {"networks": [{"name": "foo1", "id": 1}, {"name": "foo2", "id": 2}]} self.clients("neutron").list_networks.return_value = networks scenario = utils.NovaScenario(self.context) self.assertEqual(1, scenario._get_network_id("foo1")) self.assertEqual(2, scenario._get_network_id("foo2")) self.clients("neutron").list_networks.assert_called_once_with() self.assertRaises(rally_exceptions.NotFoundException, scenario._get_network_id, "foo") @ddt.data( {}, {"kwargs": {"auto_assign_nic": True}}, {"kwargs": {"auto_assign_nic": True, "nics": [{"net-id": "baz_id"}]}}, {"context": {"user": {"secgroup": {"name": "test"}}}}, {"context": {"user": {"secgroup": {"name": "new8"}}}, "kwargs": {"security_groups": ["test8"]}}, {"context": {"user": {"secgroup": {"name": "test1"}}}, "kwargs": {"security_groups": ["test1"]}}, {"kwargs": {"auto_assign_nic": False, "nics": [{"net-name": "foo_name"}]}} ) @ddt.unpack def test__boot_server(self, context=None, kwargs=None): self.clients("nova").servers.create.return_value = self.server if context is None: context = self.context context.setdefault("user", {}).setdefault("credential", mock.MagicMock()) context.setdefault("config", {}) nova_scenario = utils.NovaScenario(context=context) nova_scenario.generate_random_name = mock.Mock() nova_scenario._pick_random_nic = mock.Mock( return_value=[{"net-id": "foo"}]) nova_scenario._get_network_id = mock.Mock(return_value="foo") if kwargs is None: kwargs = {} kwargs["fakearg"] = "fakearg" return_server = nova_scenario._boot_server("image_id", "flavor_id", **kwargs) self.mock_wait_for_status.mock.assert_called_once_with( self.server, ready_statuses=["ACTIVE"], update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack.nova_server_boot_poll_interval, timeout=CONF.openstack.nova_server_boot_timeout) self.mock_get_from_manager.mock.assert_called_once_with() self.assertEqual(self.mock_wait_for_status.mock.return_value, return_server) expected_kwargs = {"fakearg": "fakearg"} if "nics" in kwargs: expected_kwargs["nics"] = kwargs["nics"] elif "auto_assign_nic" in kwargs: expected_kwargs["nics"] = (nova_scenario._pick_random_nic. return_value) expected_secgroups = set() if "security_groups" in kwargs: expected_secgroups.update(kwargs["security_groups"]) if "secgroup" in context["user"]: expected_secgroups.add(context["user"]["secgroup"]["name"]) if expected_secgroups: expected_kwargs["security_groups"] = list(expected_secgroups) self.clients("nova").servers.create.assert_called_once_with( nova_scenario.generate_random_name.return_value, "image_id", "flavor_id", **expected_kwargs) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.boot_server") def test__boot_server_with_network_exception(self): self.context.update({"tenant": {"networks": None}}) self.clients("nova").servers.create.return_value = self.server nova_scenario = utils.NovaScenario( context=self.context) self.assertRaises(TypeError, nova_scenario._boot_server, "image_id", "flavor_id", auto_assign_nic=True) def test__suspend_server(self): nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._suspend_server(self.server) self.server.suspend.assert_called_once_with() self.mock_wait_for_status.mock.assert_called_once_with( self.server, ready_statuses=["SUSPENDED"], update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack.nova_server_suspend_poll_interval, timeout=CONF.openstack.nova_server_suspend_timeout) self.mock_get_from_manager.mock.assert_called_once_with() self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.suspend_server") def test__resume_server(self): nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._resume_server(self.server) self.server.resume.assert_called_once_with() self.mock_wait_for_status.mock.assert_called_once_with( self.server, ready_statuses=["ACTIVE"], update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack.nova_server_resume_poll_interval, timeout=CONF.openstack.nova_server_resume_timeout) self.mock_get_from_manager.mock.assert_called_once_with() self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.resume_server") def test__pause_server(self): nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._pause_server(self.server) self.server.pause.assert_called_once_with() self.mock_wait_for_status.mock.assert_called_once_with( self.server, ready_statuses=["PAUSED"], update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack.nova_server_pause_poll_interval, timeout=CONF.openstack.nova_server_pause_timeout) self.mock_get_from_manager.mock.assert_called_once_with() self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.pause_server") def test__unpause_server(self): nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._unpause_server(self.server) self.server.unpause.assert_called_once_with() self.mock_wait_for_status.mock.assert_called_once_with( self.server, ready_statuses=["ACTIVE"], update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack.nova_server_unpause_poll_interval, timeout=CONF.openstack.nova_server_unpause_timeout) self.mock_get_from_manager.mock.assert_called_once_with() self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.unpause_server") def test__shelve_server(self): nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._shelve_server(self.server) self.server.shelve.assert_called_once_with() self.mock_wait_for_status.mock.assert_has_calls([ mock.call( self.server, ready_statuses=["SHELVED_OFFLOADED"], update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack.nova_server_shelve_poll_interval, timeout=CONF.openstack.nova_server_shelve_timeout ), mock.call( self.server, ready_statuses=["None"], status_attr="OS-EXT-STS:task_state", update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack.nova_server_shelve_poll_interval, timeout=CONF.openstack.nova_server_shelve_timeout)] ) self.assertEqual(2, self.mock_get_from_manager.mock.call_count) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.shelve_server") def test__unshelve_server(self): nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._unshelve_server(self.server) self.server.unshelve.assert_called_once_with() self.mock_wait_for_status.mock.assert_called_once_with( self.server, ready_statuses=["ACTIVE"], update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack.nova_server_unshelve_poll_interval, timeout=CONF.openstack.nova_server_unshelve_timeout) self.mock_get_from_manager.mock.assert_called_once_with() self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.unshelve_server") @mock.patch("rally_openstack.task.scenarios.nova.utils.image_service") def test__create_image(self, mock_image_service): glance = mock_image_service.Image.return_value glance.get_image.return_value = self.image nova_scenario = utils.NovaScenario(context=self.context) return_image = nova_scenario._create_image(self.server) self.mock_wait_for_status.mock.assert_has_calls([ mock.call( self.image, ready_statuses=["ACTIVE"], update_resource=glance.get_image, check_interval=CONF.openstack. nova_server_image_create_poll_interval, timeout=CONF.openstack.nova_server_image_create_timeout), mock.call( self.server, ready_statuses=["None"], status_attr="OS-EXT-STS:task_state", update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack. nova_server_image_create_poll_interval, timeout=CONF.openstack.nova_server_image_create_timeout) ]) self.assertEqual(self.mock_wait_for_status.mock.return_value, return_image) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.snapshot_server") def test__default_delete_server(self): nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._delete_server(self.server) self.server.delete.assert_called_once_with() self.mock_wait_for_status.mock.assert_called_once_with( self.server, ready_statuses=["deleted"], check_deletion=True, update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack.nova_server_delete_poll_interval, timeout=CONF.openstack.nova_server_delete_timeout) self.mock_get_from_manager.mock.assert_called_once_with() self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.delete_server") def test__force_delete_server(self): nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._delete_server(self.server, force=True) self.server.force_delete.assert_called_once_with() self.mock_wait_for_status.mock.assert_called_once_with( self.server, ready_statuses=["deleted"], check_deletion=True, update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack.nova_server_delete_poll_interval, timeout=CONF.openstack.nova_server_delete_timeout) self.mock_get_from_manager.mock.assert_called_once_with() self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.force_delete_server") def test__reboot_server(self): nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._reboot_server(self.server) self.server.reboot.assert_called_once_with(reboot_type="HARD") self.mock_wait_for_status.mock.assert_called_once_with( self.server, ready_statuses=["ACTIVE"], update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack.nova_server_reboot_poll_interval, timeout=CONF.openstack.nova_server_reboot_timeout) self.mock_get_from_manager.mock.assert_called_once_with() self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.reboot_server") def test__soft_reboot_server(self): nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._soft_reboot_server(self.server) self.server.reboot.assert_called_once_with(reboot_type="SOFT") self.mock_wait_for_status.mock.assert_called_once_with( self.server, ready_statuses=["ACTIVE"], update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack.nova_server_reboot_poll_interval, timeout=CONF.openstack.nova_server_reboot_timeout) self.mock_get_from_manager.mock.assert_called_once_with() self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.soft_reboot_server") def test__rebuild_server(self): nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._rebuild_server(self.server, "img", fakearg="fakearg") self.server.rebuild.assert_called_once_with("img", fakearg="fakearg") self.mock_wait_for_status.mock.assert_called_once_with( self.server, ready_statuses=["ACTIVE"], update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack.nova_server_rebuild_poll_interval, timeout=CONF.openstack.nova_server_rebuild_timeout) self.mock_get_from_manager.mock.assert_called_once_with() self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.rebuild_server") def test__start_server(self): nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._start_server(self.server) self.server.start.assert_called_once_with() self.mock_wait_for_status.mock.assert_called_once_with( self.server, ready_statuses=["ACTIVE"], update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack.nova_server_start_poll_interval, timeout=CONF.openstack.nova_server_start_timeout) self.mock_get_from_manager.mock.assert_called_once_with() self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.start_server") def test__stop_server(self): nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._stop_server(self.server) self.server.stop.assert_called_once_with() self.mock_wait_for_status.mock.assert_called_once_with( self.server, ready_statuses=["SHUTOFF"], update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack.nova_server_stop_poll_interval, timeout=CONF.openstack.nova_server_stop_timeout) self.mock_get_from_manager.mock.assert_called_once_with() self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.stop_server") def test__rescue_server(self): nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._rescue_server(self.server) self.server.rescue.assert_called_once_with() self.mock_wait_for_status.mock.assert_called_once_with( self.server, ready_statuses=["RESCUE"], update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack.nova_server_rescue_poll_interval, timeout=CONF.openstack.nova_server_rescue_timeout) self.mock_get_from_manager.mock.assert_called_once_with() self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.rescue_server") def test__unrescue_server(self): nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._unrescue_server(self.server) self.server.unrescue.assert_called_once_with() self.mock_wait_for_status.mock.assert_called_once_with( self.server, ready_statuses=["ACTIVE"], update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack.nova_server_unrescue_poll_interval, timeout=CONF.openstack.nova_server_unrescue_timeout) self.mock_get_from_manager.mock.assert_called_once_with() self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.unrescue_server") def _test_delete_servers(self, force=False): servers = [self.server, self.server1] nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._delete_servers(servers, force=force) check_interval = CONF.openstack.nova_server_delete_poll_interval expected = [] for server in servers: expected.append(mock.call( server, ready_statuses=["deleted"], check_deletion=True, update_resource=self.mock_get_from_manager.mock.return_value, check_interval=check_interval, timeout=CONF.openstack.nova_server_delete_timeout)) if force: server.force_delete.assert_called_once_with() self.assertFalse(server.delete.called) else: server.delete.assert_called_once_with() self.assertFalse(server.force_delete.called) self.mock_wait_for_status.mock.assert_has_calls(expected) timer_name = "nova.%sdelete_servers" % ("force_" if force else "") self._test_atomic_action_timer(nova_scenario.atomic_actions(), timer_name) def test__default_delete_servers(self): self._test_delete_servers() def test__force_delete_servers(self): self._test_delete_servers(force=True) @mock.patch("rally_openstack.task.scenarios.nova.utils.image_service") def test__delete_image(self, mock_image_service): glance = mock_image_service.Image.return_value nova_scenario = utils.NovaScenario(context=self.context, clients=mock.Mock()) nova_scenario._delete_image(self.image) glance.delete_image.assert_called_once_with(self.image.id) self.mock_wait_for_status.mock.assert_called_once_with( self.image, ready_statuses=["deleted", "pending_delete"], check_deletion=True, update_resource=glance.get_image, check_interval=CONF.openstack. nova_server_image_delete_poll_interval, timeout=CONF.openstack.nova_server_image_delete_timeout) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.delete_image") @ddt.data( {"requests": 1}, {"requests": 25}, {"requests": 2, "instances_amount": 100, "auto_assign_nic": True, "fakearg": "fake"}, {"auto_assign_nic": True, "nics": [{"net-id": "foo"}]}, {"auto_assign_nic": False, "nics": [{"net-id": "foo"}]}, {"auto_assign_nic": False, "nics": [{"net-name": "foo_name"}]}) @ddt.unpack def test__boot_servers(self, image_id="image", flavor_id="flavor", requests=1, instances_amount=1, auto_assign_nic=False, **kwargs): servers = [mock.Mock() for i in range(instances_amount)] self.clients("nova").servers.list.return_value = servers scenario = utils.NovaScenario(context=self.context) scenario.generate_random_name = mock.Mock() scenario._pick_random_nic = mock.Mock( return_value=[{"net-id": "foo"}]) scenario._get_network_id = mock.Mock(return_value="foo") scenario._boot_servers(image_id, flavor_id, requests, instances_amount=instances_amount, auto_assign_nic=auto_assign_nic, **kwargs) expected_kwargs = dict(kwargs) if auto_assign_nic and "nics" not in kwargs: expected_kwargs["nics"] = scenario._pick_random_nic.return_value create_calls = [ mock.call( "%s_%d" % (scenario.generate_random_name.return_value, i), image_id, flavor_id, min_count=instances_amount, max_count=instances_amount, **expected_kwargs) for i in range(requests)] self.clients("nova").servers.create.assert_has_calls(create_calls) wait_for_status_calls = [ mock.call( servers[i], ready_statuses=["ACTIVE"], update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack.nova_server_boot_poll_interval, timeout=CONF.openstack.nova_server_boot_timeout) for i in range(instances_amount)] self.mock_wait_for_status.mock.assert_has_calls(wait_for_status_calls) self.mock_get_from_manager.mock.assert_has_calls( [mock.call() for i in range(instances_amount)]) self._test_atomic_action_timer(scenario.atomic_actions(), "nova.boot_servers") def test__show_server(self): nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._show_server(self.server) self.clients("nova").servers.get.assert_called_once_with( self.server ) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.show_server") def test__get_console_server(self): nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._get_server_console_output(self.server) self.clients( "nova").servers.get_console_output.assert_called_once_with( self.server, length=None) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.get_console_output_server") def test__get_console_url(self): nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._get_console_url_server(self.server, "foo") self.clients( "nova").servers.get_console_url.assert_called_once_with( self.server, "foo") self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.get_console_url_server") def test__associate_floating_ip(self): clients = mock.MagicMock() nova_scenario = utils.NovaScenario(context=self.context, clients=clients) neutronclient = clients.neutron.return_value neutronclient.list_ports.return_value = {"ports": [{"id": "p1"}, {"id": "p2"}]} fip_ip = "172.168.0.1" fip_id = "some" # case #1- an object from neutronclient floating_ip = {"floating_ip_address": fip_ip, "id": fip_id} nova_scenario._associate_floating_ip(self.server, floating_ip) neutronclient.update_floatingip.assert_called_once_with( fip_id, {"floatingip": {"port_id": "p1"}} ) # case #2 - an object from network wrapper neutronclient.update_floatingip.reset_mock() floating_ip = {"ip": fip_ip, "id": fip_id} nova_scenario._associate_floating_ip(self.server, floating_ip) neutronclient.update_floatingip.assert_called_once_with( fip_id, {"floatingip": {"port_id": "p1"}} ) # these should not be called in both cases self.assertFalse(neutronclient.list_floatingips.called) # it is an old behavior. let's check that it was not called self.assertFalse(self.server.add_floating_ip.called) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.associate_floating_ip", count=2) def test__associate_floating_ip_deprecated_behavior(self): clients = mock.MagicMock() nova_scenario = utils.NovaScenario(context=self.context, clients=clients) neutronclient = clients.neutron.return_value neutronclient.list_ports.return_value = {"ports": [{"id": "p1"}, {"id": "p2"}]} fip_id = "fip1" fip_ip = "172.168.0.1" neutronclient.list_floatingips.return_value = { "floatingips": [ {"id": fip_id, "floating_ip_address": fip_ip}, {"id": "fip2", "floating_ip_address": "127.0.0.1"}]} nova_scenario._associate_floating_ip(self.server, fip_ip) neutronclient.update_floatingip.assert_called_once_with( fip_id, {"floatingip": {"port_id": "p1"}} ) neutronclient.list_floatingips.assert_called_once_with( floating_ip_address=fip_ip) # it is an old behavior. let's check that it was not called self.assertFalse(self.server.add_floating_ip.called) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.associate_floating_ip") def test__dissociate_floating_ip(self): clients = mock.MagicMock() nova_scenario = utils.NovaScenario(context=self.context, clients=clients) neutronclient = clients.neutron.return_value fip_ip = "172.168.0.1" fip_id = "some" # case #1- an object from neutronclient floating_ip = {"floating_ip_address": fip_ip, "id": fip_id} nova_scenario._dissociate_floating_ip(self.server, floating_ip) neutronclient.update_floatingip.assert_called_once_with( fip_id, {"floatingip": {"port_id": None}} ) # case #2 - an object from network wrapper neutronclient.update_floatingip.reset_mock() floating_ip = {"ip": fip_ip, "id": fip_id} nova_scenario._dissociate_floating_ip(self.server, floating_ip) neutronclient.update_floatingip.assert_called_once_with( fip_id, {"floatingip": {"port_id": None}} ) # these should not be called in both cases self.assertFalse(neutronclient.list_floatingips.called) # it is an old behavior. let's check that it was not called self.assertFalse(self.server.add_floating_ip.called) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.dissociate_floating_ip", count=2) def test__disassociate_floating_ip_deprecated_behavior(self): clients = mock.MagicMock() nova_scenario = utils.NovaScenario(context=self.context, clients=clients) neutronclient = clients.neutron.return_value fip_id = "fip1" fip_ip = "172.168.0.1" neutronclient.list_floatingips.return_value = { "floatingips": [ {"id": fip_id, "floating_ip_address": fip_ip}, {"id": "fip2", "floating_ip_address": "127.0.0.1"}]} nova_scenario._dissociate_floating_ip(self.server, fip_ip) neutronclient.update_floatingip.assert_called_once_with( fip_id, {"floatingip": {"port_id": None}} ) neutronclient.list_floatingips.assert_called_once_with( floating_ip_address=fip_ip) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.dissociate_floating_ip") def test__check_ip_address(self): nova_scenario = utils.NovaScenario(context=self.context) fake_server = fakes.FakeServerManager().create("test_server", "image_id_01", "flavor_id_01") fake_server.addresses = { "private": [ {"version": 4, "addr": "1.2.3.4"}, ]} floating_ip = fakes.FakeFloatingIP() floating_ip.ip = "10.20.30.40" # Also test function check_ip_address accept a string as attr self.assertFalse( nova_scenario.check_ip_address(floating_ip.ip)(fake_server)) self.assertTrue( nova_scenario.check_ip_address(floating_ip.ip, must_exist=False) (fake_server)) fake_server.addresses["private"].append( {"version": 4, "addr": floating_ip.ip} ) # Also test function check_ip_address accept an object with attr ip self.assertTrue( nova_scenario.check_ip_address(floating_ip) (fake_server)) self.assertFalse( nova_scenario.check_ip_address(floating_ip, must_exist=False) (fake_server)) def test__resize(self): nova_scenario = utils.NovaScenario(context=self.context) to_flavor = mock.Mock() nova_scenario._resize(self.server, to_flavor) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.resize") def test__resize_confirm(self): nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._resize_confirm(self.server) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.resize_confirm") @ddt.data({}, {"status": "SHUTOFF"}) @ddt.unpack def test__resize_revert(self, status=None): nova_scenario = utils.NovaScenario(context=self.context) if status is None: nova_scenario._resize_revert(self.server) status = "ACTIVE" else: nova_scenario._resize_revert(self.server, status=status) self.mock_wait_for_status.mock.assert_called_once_with( self.server, ready_statuses=[status], update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack. nova_server_resize_revert_poll_interval, timeout=CONF.openstack.nova_server_resize_revert_timeout) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.resize_revert") @mock.patch("rally_openstack.common.services.storage.block.BlockStorage") def test__update_volume_resource(self, mock_block_storage): volume = fakes.FakeVolume(id=1) cinder = mock_block_storage.return_value cinder.get_volume = mock.MagicMock() nova_scenario = utils.NovaScenario(context=self.context) self.assertEqual(cinder.get_volume.return_value, nova_scenario._update_volume_resource(volume)) def test__attach_volume(self): expect_attach = mock.MagicMock() device = None (self.clients("nova").volumes.create_server_volume .return_value) = expect_attach nova_scenario = utils.NovaScenario(context=self.context) attach = nova_scenario._attach_volume(self.server, self.volume, device) (self.clients("nova").volumes.create_server_volume .assert_called_once_with(self.server.id, self.volume.id, device)) self.assertEqual(expect_attach, attach) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.attach_volume") def test__list_attachments(self): expect_attachments = [mock.MagicMock()] (self.clients("nova").volumes.get_server_volumes .return_value) = expect_attachments nova_scenario = utils.NovaScenario(context=self.context) list_attachments = nova_scenario._list_attachments(self.server.id) self.assertEqual(expect_attachments, list_attachments) (self.clients("nova").volumes.get_server_volumes .assert_called_once_with(self.server.id)) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.list_attachments") def test__detach_volume(self): attach = mock.MagicMock(id="attach_id") self.clients("nova").volumes.delete_server_volume.return_value = None nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._detach_volume(self.server, self.volume, attach) (self.clients("nova").volumes.delete_server_volume .assert_called_once_with(self.server.id, self.volume.id)) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.detach_volume") def test__detach_volume_no_attach(self): self.clients("nova").volumes.delete_server_volume.return_value = None nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._detach_volume(self.server, self.volume, None) (self.clients("nova").volumes.delete_server_volume .assert_called_once_with(self.server.id, self.volume.id)) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.detach_volume") def test__live_migrate_server(self): self.admin_clients("nova").servers.get(return_value=self.server) nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._live_migrate(self.server, block_migration=False, disk_over_commit=False, skip_compute_nodes_check=True, skip_host_check=True) self.mock_wait_for_status.mock.assert_called_once_with( self.server, ready_statuses=["ACTIVE"], update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack. nova_server_live_migrate_poll_interval, timeout=CONF.openstack.nova_server_live_migrate_timeout) self.mock_get_from_manager.mock.assert_called_once_with() self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.live_migrate") def test__migrate_server(self): fake_server = self.server setattr(fake_server, "OS-EXT-SRV-ATTR:host", "a1") self.clients("nova").servers.get(return_value=fake_server) nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._migrate(fake_server, skip_compute_nodes_check=True, skip_host_check=True) self.mock_wait_for_status.mock.assert_called_once_with( fake_server, ready_statuses=["VERIFY_RESIZE"], update_resource=self.mock_get_from_manager.mock.return_value, check_interval=CONF.openstack.nova_server_migrate_poll_interval, timeout=CONF.openstack.nova_server_migrate_timeout) self.mock_get_from_manager.mock.assert_called_once_with() self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.migrate") self.assertRaises(rally_exceptions.RallyException, nova_scenario._migrate, fake_server, skip_compute_nodes_check=True, skip_host_check=False) self.assertRaises(rally_exceptions.RallyException, nova_scenario._migrate, fake_server, skip_compute_nodes_check=False, skip_host_check=True) def test__add_server_secgroups(self): server = mock.Mock() fake_secgroups = [fakes.FakeSecurityGroup(None, None, 1, "uuid1")] nova_scenario = utils.NovaScenario() security_group = fake_secgroups[0] result = nova_scenario._add_server_secgroups(server, security_group.name) self.assertEqual( self.clients("nova").servers.add_security_group.return_value, result) (self.clients("nova").servers.add_security_group. assert_called_once_with(server, security_group.name)) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.add_server_secgroups") def test__list_keypairs(self): nova_scenario = utils.NovaScenario() result = nova_scenario._list_keypairs() self.assertEqual(self.clients("nova").keypairs.list.return_value, result) self.clients("nova").keypairs.list.assert_called_once_with() self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.list_keypairs") def test__create_keypair(self): nova_scenario = utils.NovaScenario() nova_scenario.generate_random_name = mock.Mock( return_value="rally_nova_keypair_fake") result = nova_scenario._create_keypair(fakeargs="fakeargs") self.assertEqual( self.clients("nova").keypairs.create.return_value.name, result) self.clients("nova").keypairs.create.assert_called_once_with( "rally_nova_keypair_fake", fakeargs="fakeargs") self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.create_keypair") def test__get_server_group(self): nova_scenario = utils.NovaScenario() fakeid = 12345 result = nova_scenario._get_server_group(fakeid) self.assertEqual( self.clients("nova").server_groups.get.return_value, result) self.clients("nova").server_groups.get.assert_called_once_with( fakeid) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.get_server_group") def test__create_server_group(self): nova_scenario = utils.NovaScenario() nova_scenario.generate_random_name = mock.Mock( return_value="random_name") result = nova_scenario._create_server_group(fakeargs="fakeargs") self.assertEqual( self.clients("nova").server_groups.create.return_value, result) self.clients("nova").server_groups.create.assert_called_once_with( name="random_name", fakeargs="fakeargs") self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.create_server_group") def test__delete_server_group(self): nova_scenario = utils.NovaScenario() fakeid = 12345 result = nova_scenario._delete_server_group(fakeid) self.assertEqual( self.clients("nova").server_groups.delete.return_value, result) self.clients("nova").server_groups.delete.assert_called_once_with( fakeid) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.delete_server_group") def test__list_server_groups(self): nova_scenario = utils.NovaScenario() result1 = nova_scenario._list_server_groups(all_projects=False) result2 = nova_scenario._list_server_groups(all_projects=True) self.assertEqual(self.clients("nova").server_groups.list.return_value, result1) admcli = self.admin_clients("nova") self.assertEqual(admcli.server_groups.list.return_value, result2) self.clients("nova").server_groups.list.assert_called_once_with( False) self.admin_clients("nova").server_groups.list.assert_called_once_with( True) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.list_server_groups", count=2) def test__delete_keypair(self): nova_scenario = utils.NovaScenario() nova_scenario._delete_keypair("fake_keypair") self.clients("nova").keypairs.delete.assert_called_once_with( "fake_keypair") self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.delete_keypair") def test__get_keypair(self): nova_scenario = utils.NovaScenario() nova_scenario._get_keypair("fake_keypair") self.clients("nova").keypairs.get.assert_called_once_with( "fake_keypair") self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.get_keypair") def test__list_hypervisors(self): nova_scenario = utils.NovaScenario() result = nova_scenario._list_hypervisors(detailed=False) self.assertEqual( self.admin_clients("nova").hypervisors.list.return_value, result) self.admin_clients("nova").hypervisors.list.assert_called_once_with( False) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.list_hypervisors") def test__statistics_hypervisors(self): nova_scenario = utils.NovaScenario() result = nova_scenario._statistics_hypervisors() self.assertEqual( self.admin_clients("nova").hypervisors.statistics.return_value, result) (self.admin_clients("nova").hypervisors.statistics. assert_called_once_with()) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.statistics_hypervisors") def test__get_hypervisor(self): hypervisor = mock.Mock() nova_scenario = utils.NovaScenario() result = nova_scenario._get_hypervisor(hypervisor) self.assertEqual( self.admin_clients("nova").hypervisors.get.return_value, result) self.admin_clients("nova").hypervisors.get.assert_called_once_with( hypervisor) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.get_hypervisor") def test__search_hypervisors(self): nova_scenario = utils.NovaScenario() nova_scenario._search_hypervisors("fake_hostname", servers=False) self.admin_clients("nova").hypervisors.search.assert_called_once_with( "fake_hostname", servers=False) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.search_hypervisors") def test__list_interfaces(self): nova_scenario = utils.NovaScenario() result = nova_scenario._list_interfaces("server") self.assertEqual( self.clients("nova").servers.interface_list.return_value, result) self.clients("nova").servers.interface_list.assert_called_once_with( "server") self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.list_interfaces") def test__lock_server(self): server = mock.Mock() nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._lock_server(server) server.lock.assert_called_once_with() self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.lock_server") def test__unlock_server(self): server = mock.Mock() nova_scenario = utils.NovaScenario(context=self.context) nova_scenario._unlock_server(server) server.unlock.assert_called_once_with() self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.unlock_server") def test__delete_network(self): nova_scenario = utils.NovaScenario() result = nova_scenario._delete_network("fake_net_id") self.assertEqual( self.admin_clients("nova").networks.delete.return_value, result) self.admin_clients("nova").networks.delete.assert_called_once_with( "fake_net_id") self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.delete_network") def test__list_flavors(self): nova_scenario = utils.NovaScenario() result = nova_scenario._list_flavors(detailed=True, fakearg="fakearg") self.assertEqual(self.clients("nova").flavors.list.return_value, result) self.clients("nova").flavors.list.assert_called_once_with( True, fakearg="fakearg") self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.list_flavors") def test__set_flavor_keys(self): flavor = mock.MagicMock() nova_scenario = utils.NovaScenario() extra_specs = {"fakeargs": "foo"} flavor.set_keys = mock.MagicMock() result = nova_scenario._set_flavor_keys(flavor, extra_specs) self.assertEqual(flavor.set_keys.return_value, result) flavor.set_keys.assert_called_once_with(extra_specs) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.set_flavor_keys") @ddt.data({}, {"hypervisor": "foo_hypervisor"}) @ddt.unpack def test__list_agents(self, hypervisor=None): nova_scenario = utils.NovaScenario() result = nova_scenario._list_agents(hypervisor) self.assertEqual( self.admin_clients("nova").agents.list.return_value, result) self.admin_clients("nova").agents.list.assert_called_once_with( hypervisor) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.list_agents") def test__list_aggregates(self): nova_scenario = utils.NovaScenario() result = nova_scenario._list_aggregates() self.assertEqual( self.admin_clients("nova").aggregates.list.return_value, result) self.admin_clients("nova").aggregates.list.assert_called_once_with() self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.list_aggregates") def test__list_availability_zones(self): nova_scenario = utils.NovaScenario() result = nova_scenario._list_availability_zones(detailed=True) self.assertEqual( self.admin_clients("nova").availability_zones.list.return_value, result) avail_zones_client = self.admin_clients("nova").availability_zones avail_zones_client.list.assert_called_once_with(True) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.list_availability_zones") @ddt.data({}, {"host": "foo_host"}, {"binary": "foo_binary"}, {"host": "foo_host", "binary": "foo_binary"}) @ddt.unpack def test__list_services(self, host=None, binary=None): nova_scenario = utils.NovaScenario() result = nova_scenario._list_services(host=host, binary=binary) self.assertEqual(self.admin_clients("nova").services.list.return_value, result) self.admin_clients("nova").services.list.assert_called_once_with( host, binary) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.list_services") def test__list_flavor_access(self): nova_scenario = utils.NovaScenario() result = nova_scenario._list_flavor_access("foo_id") self.assertEqual( self.admin_clients("nova").flavor_access.list.return_value, result) self.admin_clients("nova").flavor_access.list.assert_called_once_with( flavor="foo_id") self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.list_flavor_access") def test__add_tenant_access(self): tenant = mock.Mock() flavor = mock.Mock() nova_scenario = utils.NovaScenario() admin_clients = self.admin_clients("nova") result = nova_scenario._add_tenant_access(flavor.id, tenant.id) self.assertEqual( admin_clients.flavor_access.add_tenant_access.return_value, result) admin_clients.flavor_access.add_tenant_access.assert_called_once_with( flavor.id, tenant.id) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.add_tenant_access") def test__create_flavor(self): nova_scenario = utils.NovaScenario() random_name = "random_name" nova_scenario.generate_random_name = mock.Mock( return_value=random_name) result = nova_scenario._create_flavor(500, 1, 1, fakearg="fakearg") self.assertEqual( self.admin_clients("nova").flavors.create.return_value, result) self.admin_clients("nova").flavors.create.assert_called_once_with( random_name, 500, 1, 1, fakearg="fakearg") self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.create_flavor") def test__get_flavor(self): nova_scenario = utils.NovaScenario() result = nova_scenario._get_flavor("foo_flavor_id") self.assertEqual( self.admin_clients("nova").flavors.get.return_value, result) self.admin_clients("nova").flavors.get.assert_called_once_with( "foo_flavor_id") self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.get_flavor") def test__delete_flavor(self): nova_scenario = utils.NovaScenario() result = nova_scenario._delete_flavor("foo_flavor_id") self.assertEqual( self.admin_clients("nova").flavors.delete.return_value, result) self.admin_clients("nova").flavors.delete.assert_called_once_with( "foo_flavor_id") self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.delete_flavor") def test__update_server(self): server = mock.Mock() nova_scenario = utils.NovaScenario() nova_scenario.generate_random_name = mock.Mock( return_value="new_name") server.update = mock.Mock() result = nova_scenario._update_server(server) self.assertEqual(result, server.update.return_value) nova_scenario.generate_random_name.assert_called_once_with() server.update.assert_called_once_with(name="new_name") nova_scenario.generate_random_name.reset_mock() server.update.reset_mock() result = nova_scenario._update_server(server, description="desp") self.assertEqual(result, server.update.return_value) nova_scenario.generate_random_name.assert_called_once_with() server.update.assert_called_once_with(name="new_name", description="desp") self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.update_server", count=2) def test_create_aggregate(self): nova_scenario = utils.NovaScenario(context=self.context) random_name = "random_name" nova_scenario.generate_random_name = mock.Mock( return_value=random_name) result = nova_scenario._create_aggregate("nova") self.assertEqual( self.admin_clients("nova").aggregates.create.return_value, result) self.admin_clients("nova").aggregates.create.assert_called_once_with( random_name, "nova") self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.create_aggregate") def test_delete_aggregate(self): nova_scenario = utils.NovaScenario(context=self.context) result = nova_scenario._delete_aggregate("fake_aggregate") self.assertEqual( self.admin_clients("nova").aggregates.delete.return_value, result) self.admin_clients("nova").aggregates.delete.assert_called_once_with( "fake_aggregate") self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.delete_aggregate") def test_get_aggregate_details(self): nova_scenario = utils.NovaScenario(context=self.context) result = nova_scenario._get_aggregate_details("fake_aggregate") self.assertEqual( self.admin_clients("nova").aggregates.get_details.return_value, result) self.admin_clients( "nova").aggregates.get_details.assert_called_once_with( "fake_aggregate") self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.get_aggregate_details") def test_update_aggregate(self): aggregate = mock.Mock() nova_scenario = utils.NovaScenario(context=self.context) nova_scenario.generate_random_name = mock.Mock( return_value="random_name") values = {"name": "random_name", "availability_zone": "random_name"} result = nova_scenario._update_aggregate(aggregate=aggregate) self.assertEqual( self.admin_clients("nova").aggregates.update.return_value, result) self.admin_clients("nova").aggregates.update.assert_called_once_with( aggregate, values) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.update_aggregate") def test_aggregate_add_host(self): nova_scenario = utils.NovaScenario(context=self.context) result = nova_scenario._aggregate_add_host("fake_agg", "fake_host") self.assertEqual( self.admin_clients("nova").aggregates.add_host.return_value, result) self.admin_clients("nova").aggregates.add_host.assert_called_once_with( "fake_agg", "fake_host") self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.aggregate_add_host") def test_aggregate_remove_host(self): nova_scenario = utils.NovaScenario(context=self.context) result = nova_scenario._aggregate_remove_host("fake_agg", "fake_host") self.assertEqual( self.admin_clients("nova").aggregates.remove_host.return_value, result) self.admin_clients( "nova").aggregates.remove_host.assert_called_once_with( "fake_agg", "fake_host") self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.aggregate_remove_host") def test__uptime_hypervisor(self): nova_scenario = utils.NovaScenario() nova_scenario._uptime_hypervisor("fake_hostname") self.admin_clients("nova").hypervisors.uptime.assert_called_once_with( "fake_hostname") self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.uptime_hypervisor") def test__attach_interface(self): fake_server = mock.Mock() nova_scenario = utils.NovaScenario() result = nova_scenario._attach_interface(fake_server, net_id="id") self.assertEqual( self.clients("nova").servers.interface_attach.return_value, result) self.clients("nova").servers.interface_attach.assert_called_once_with( fake_server, None, "id", None) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.attach_interface") def test_aggregate_set_metadata(self): nova_scenario = utils.NovaScenario(context=self.context) fake_metadata = {"test_metadata": "true"} result = nova_scenario._aggregate_set_metadata("fake_aggregate", fake_metadata) self.assertEqual( self.admin_clients("nova").aggregates.set_metadata.return_value, result) self.admin_clients( "nova").aggregates.set_metadata.assert_called_once_with( "fake_aggregate", fake_metadata) self._test_atomic_action_timer(nova_scenario.atomic_actions(), "nova.aggregate_set_metadata")
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,780
openstack/rally-openstack
refs/heads/master
/rally_openstack/common/validators.py
# Copyright 2017: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import os import re import yaml from rally.common import logging from rally.common import validation from rally import exceptions from rally.plugins.common import validators from rally.task import types from rally_openstack.common import consts from rally_openstack.task.contexts.keystone import roles from rally_openstack.task.contexts.nova import flavors as flavors_ctx from rally_openstack.task import types as openstack_types LOG = logging.getLogger(__name__) @validation.configure("required_platform", platform="openstack") class RequiredOpenStackValidator(validation.RequiredPlatformValidator): def __init__(self, admin=False, users=False): """Validates credentials for OpenStack platform. This allows us to create 3 kind of tests cases: 1) requires platform with admin 2) requires platform with admin + users 3) requires platform with users :param admin: requires admin credential :param users: requires user credentials """ super(RequiredOpenStackValidator, self).__init__(platform="openstack") self.admin = admin self.users = users def validate(self, context, config, plugin_cls, plugin_cfg): if not (self.admin or self.users): self.fail("You should specify admin=True or users=True or both.") context = context["platforms"].get(self.platform, {}) if self.admin and context.get("admin") is None: self.fail("No admin credentials for openstack") if self.users and len(context.get("users", ())) == 0: if context.get("admin") is None: self.fail("No user credentials for openstack") else: # NOTE(andreykurilin): It is a case when the plugin requires # 'users' for launching, but there are no specified users in # deployment. Let's assume that 'users' context can create # them via admin user and do not fail." pass def with_roles_ctx(): """Add roles to users for validate """ def decorator(func): def wrapper(*args, **kw): func_type = inspect.getcallargs(func, *args, **kw) config = func_type.get("config", {}) context = func_type.get("context", {}) if config.get("contexts", {}).get("roles") \ and context.get("admin", {}): context["config"] = config["contexts"] rolegenerator = roles.RoleGenerator(context) with rolegenerator: rolegenerator.setup() func(*args, **kw) else: func(*args, **kw) return wrapper return decorator @validation.add("required_platform", platform="openstack", users=True) @validation.configure(name="image_exists", platform="openstack") class ImageExistsValidator(validation.Validator): def __init__(self, param_name, nullable): """Validator checks existed image or not :param param_name: defines which variable should be used to get image id value. :param nullable: defines image id param is required """ super(ImageExistsValidator, self).__init__() self.param_name = param_name self.nullable = nullable @with_roles_ctx() def validate(self, context, config, plugin_cls, plugin_cfg): from glanceclient import exc as glance_exc image_args = config.get("args", {}).get(self.param_name) if not image_args and self.nullable: return image_context = config.get("contexts", {}).get("images", {}) image_ctx_name = image_context.get("image_name") if not image_args: self.fail("Parameter %s is not specified." % self.param_name) if "image_name" in image_context: # NOTE(rvasilets) check string is "exactly equal to" a regex # or image name from context equal to image name from args if "regex" in image_args: match = re.match(image_args.get("regex"), image_ctx_name) if image_ctx_name == image_args.get("name") or ( "regex" in image_args and match): return try: for user in context["users"]: image_processor = openstack_types.GlanceImage( context={"admin": {"credential": user["credential"]}}) image_id = image_processor.pre_process(image_args, config={}) user["credential"].clients().glance().images.get(image_id) except (glance_exc.HTTPNotFound, exceptions.InvalidScenarioArgument): self.fail("Image '%s' not found" % image_args) @validation.add("required_platform", platform="openstack", users=True) @validation.configure(name="external_network_exists", platform="openstack") class ExternalNetworkExistsValidator(validation.Validator): def __init__(self, param_name): """Validator checks that external network with given name exists. :param param_name: name of validated network """ super(ExternalNetworkExistsValidator, self).__init__() self.param_name = param_name @with_roles_ctx() def validate(self, context, config, plugin_cls, plugin_cfg): ext_network = config.get("args", {}).get(self.param_name) if not ext_network: return result = [] for user in context["users"]: creds = user["credential"] networks = creds.clients().neutron().list_networks()["networks"] external_networks = [net["name"] for net in networks if net.get("router:external", False)] if ext_network not in external_networks: message = ("External (floating) network with name {1} " "not found by user {0}. " "Available networks: {2}").format(creds.username, ext_network, networks) result.append(message) if result: self.fail("\n".join(result)) @validation.add("required_platform", platform="openstack", users=True) @validation.configure(name="required_neutron_extensions", platform="openstack") class RequiredNeutronExtensionsValidator(validation.Validator): def __init__(self, extensions, *args): """Validator checks if the specified Neutron extension is available :param extensions: list of Neutron extensions """ super(RequiredNeutronExtensionsValidator, self).__init__() if isinstance(extensions, (list, tuple)): # services argument is a list, so it is a new way of validators # usage, args in this case should not be provided self.req_ext = extensions if args: LOG.warning("Positional argument is not what " "'required_neutron_extensions' decorator expects. " "Use `extensions` argument instead") else: # it is old way validator self.req_ext = [extensions] self.req_ext.extend(args) @with_roles_ctx() def validate(self, context, config, plugin_cls, plugin_cfg): clients = context["users"][0]["credential"].clients() extensions = clients.neutron().list_extensions()["extensions"] aliases = [x["alias"] for x in extensions] for extension in self.req_ext: if extension not in aliases: self.fail("Neutron extension %s is not configured" % extension) @validation.add("required_platform", platform="openstack", users=True) @validation.configure(name="flavor_exists", platform="openstack") class FlavorExistsValidator(validation.Validator): def __init__(self, param_name): """Returns validator for flavor :param param_name: defines which variable should be used to get flavor id value. """ super(FlavorExistsValidator, self).__init__() self.param_name = param_name def _get_flavor_from_context(self, config, flavor_value): if "flavors" not in config.get("contexts", {}): self.fail("No flavors context") flavors = [flavors_ctx.FlavorConfig(**f) for f in config["contexts"]["flavors"]] resource = types.obj_from_name(resource_config=flavor_value, resources=flavors, typename="flavor") flavor = flavors_ctx.FlavorConfig(**resource) flavor.id = "<context flavor: %s>" % flavor.name return flavor def _get_validated_flavor(self, config, clients, param_name): from novaclient import exceptions as nova_exc flavor_value = config.get("args", {}).get(param_name) if not flavor_value: self.fail("Parameter %s is not specified." % param_name) try: flavor_processor = openstack_types.Flavor( context={"admin": {"credential": clients.credential}}) flavor_id = flavor_processor.pre_process(flavor_value, config={}) flavor = clients.nova().flavors.get(flavor=flavor_id) return flavor except (nova_exc.NotFound, exceptions.InvalidScenarioArgument): try: return self._get_flavor_from_context(config, flavor_value) except validation.ValidationError: pass self.fail("Flavor '%s' not found" % flavor_value) @with_roles_ctx() def validate(self, context, config, plugin_cls, plugin_cfg): # flavors do not depend on user or tenant, so checking for one user # should be enough clients = context["users"][0]["credential"].clients() self._get_validated_flavor(config=config, clients=clients, param_name=self.param_name) @validation.add("required_platform", platform="openstack", users=True) @validation.configure(name="image_valid_on_flavor", platform="openstack") class ImageValidOnFlavorValidator(FlavorExistsValidator): def __init__(self, flavor_param, image_param, fail_on_404_image=True, validate_disk=True): """Returns validator for image could be used for current flavor :param flavor_param: defines which variable should be used to get flavor id value. :param image_param: defines which variable should be used to get image id value. :param validate_disk: flag to indicate whether to validate flavor's disk. Should be True if instance is booted from image. Should be False if instance is booted from volume. Default value is True. :param fail_on_404_image: flag what indicate whether to validate image or not. """ super(ImageValidOnFlavorValidator, self).__init__(flavor_param) self.image_name = image_param self.fail_on_404_image = fail_on_404_image self.validate_disk = validate_disk def _get_validated_image(self, config, clients, param_name): from glanceclient import exc as glance_exc image_context = config.get("contexts", {}).get("images", {}) image_args = config.get("args", {}).get(param_name) image_ctx_name = image_context.get("image_name") if not image_args: self.fail("Parameter %s is not specified." % param_name) if "image_name" in image_context: # NOTE(rvasilets) check string is "exactly equal to" a regex # or image name from context equal to image name from args if "regex" in image_args: match = re.match(image_args.get("regex"), image_ctx_name) if image_ctx_name == image_args.get("name") or ("regex" in image_args and match): image = { "size": image_context.get("min_disk", 0), "min_ram": image_context.get("min_ram", 0), "min_disk": image_context.get("min_disk", 0) } return image try: image_processor = openstack_types.GlanceImage( context={"admin": {"credential": clients.credential}}) image_id = image_processor.pre_process(image_args, config={}) image = clients.glance().images.get(image_id) if hasattr(image, "to_dict"): # NOTE(stpierre): Glance v1 images are objects that can be # converted to dicts; Glance v2 images are already # dict-like image = image.to_dict() if not image.get("size"): image["size"] = 0 if not image.get("min_ram"): image["min_ram"] = 0 if not image.get("min_disk"): image["min_disk"] = 0 return image except (glance_exc.HTTPNotFound, exceptions.InvalidScenarioArgument): self.fail("Image '%s' not found" % image_args) @with_roles_ctx() def validate(self, context, config, plugin_cls, plugin_cfg): flavor = None for user in context["users"]: clients = user["credential"].clients() if not flavor: flavor = self._get_validated_flavor( config, clients, self.param_name) try: image = self._get_validated_image(config, clients, self.image_name) except validation.ValidationError: if not self.fail_on_404_image: return raise if flavor.ram < image["min_ram"]: self.fail("The memory size for flavor '%s' is too small " "for requested image '%s'." % (flavor.id, image["id"])) if flavor.disk and self.validate_disk: if flavor.disk * (1024 ** 3) < image["size"]: self.fail("The disk size for flavor '%s' is too small " "for requested image '%s'." % (flavor.id, image["id"])) if flavor.disk < image["min_disk"]: self.fail("The minimal disk size for flavor '%s' is " "too small for requested image '%s'." % (flavor.id, image["id"])) @validation.add("required_platform", platform="openstack", users=True) @validation.configure(name="required_services", platform="openstack") class RequiredServicesValidator(validation.Validator): def __init__(self, services, *args): """Validator checks if specified OpenStack services are available. :param services: list with names of required services """ super(RequiredServicesValidator, self).__init__() if isinstance(services, (list, tuple)): # services argument is a list, so it is a new way of validators # usage, args in this case should not be provided self.services = services if args: LOG.warning("Positional argument is not what " "'required_services' decorator expects. " "Use `services` argument instead") else: # it is old way validator self.services = [services] self.services.extend(args) def validate(self, context, config, plugin_cls, plugin_cfg): if consts.Service.NOVA_NET in self.services: self.fail("We are sorry, but Nova-network was deprecated for a " "long time and latest novaclient doesn't support it, so " "we too.") creds = (context.get("admin", {}).get("credential", None) or context["users"][0]["credential"]) if "api_versions" in config.get("contexts", {}): api_versions = config["contexts"]["api_versions"] else: api_versions = config.get("contexts", {}).get( "api_versions@openstack", {}) available_services = creds.clients().services().values() for service in self.services: service_config = api_versions.get(service, {}) if ("service_type" in service_config or "service_name" in service_config): # NOTE(andreykurilin): validator should ignore services # configured via api_versions@openstack since the context # plugin itself should perform a proper validation continue if service not in available_services: self.fail( ("'{0}' service is not available. Hint: If '{0}' " "service has non-default service_type, try to setup " "it via 'api_versions@openstack' context." ).format(service)) @validation.add("required_platform", platform="openstack", users=True) @validation.configure(name="validate_heat_template", platform="openstack") class ValidateHeatTemplateValidator(validation.Validator): def __init__(self, params, *args): """Validates heat template. :param params: list of parameters to be validated. """ super(ValidateHeatTemplateValidator, self).__init__() if isinstance(params, (list, tuple)): # services argument is a list, so it is a new way of validators # usage, args in this case should not be provided self.params = params if args: LOG.warning("Positional argument is not what " "'validate_heat_template' decorator expects. " "Use `params` argument instead") else: # it is old way validator self.params = [params] self.params.extend(args) @with_roles_ctx() def validate(self, context, config, plugin_cls, plugin_cfg): for param_name in self.params: template_path = config.get("args", {}).get(param_name) if not template_path: msg = ("Path to heat template is not specified. Its needed " "for heat template validation. Please check the " "content of `{}` scenario argument.") return self.fail(msg.format(param_name)) template_path = os.path.expanduser(template_path) if not os.path.exists(template_path): self.fail("No file found by the given path %s" % template_path) with open(template_path, "r") as f: try: for user in context["users"]: clients = user["credential"].clients() clients.heat().stacks.validate(template=f.read()) except Exception as e: self.fail("Heat template validation failed on %(path)s. " "Original error message: %(msg)s." % {"path": template_path, "msg": str(e)}) @validation.add("required_platform", platform="openstack", admin=True) @validation.configure(name="required_cinder_services", platform="openstack") class RequiredCinderServicesValidator(validation.Validator): def __init__(self, services): """Validator checks that specified Cinder service is available. It uses Cinder client with admin permissions to call 'cinder service-list' call :param services: Cinder service name """ super(RequiredCinderServicesValidator, self).__init__() self.services = services @with_roles_ctx() def validate(self, context, config, plugin_cls, plugin_cfg): clients = context["admin"]["credential"].clients() for service in clients.cinder().services.list(): if (service.binary == str(self.services) and service.state == str("up")): return self.fail("%s service is not available" % self.services) @validation.add("required_platform", platform="openstack", users=True) @validation.configure(name="required_api_versions", platform="openstack") class RequiredAPIVersionsValidator(validation.Validator): def __init__(self, component, versions): """Validator checks component API versions. :param component: name of required component :param versions: version of required component """ super(RequiredAPIVersionsValidator, self).__init__() self.component = component self.versions = versions def validate(self, context, config, plugin_cls, plugin_cfg): versions = [str(v) for v in self.versions] versions_str = ", ".join(versions) msg = ("Task was designed to be used with %(component)s " "V%(version)s, but V%(found_version)s is " "selected.") for user in context["users"]: clients = user["credential"].clients() if self.component == "keystone": if "2.0" not in versions and hasattr( clients.keystone(), "tenants"): self.fail(msg % {"component": self.component, "version": versions_str, "found_version": "2.0"}) if "3" not in versions and hasattr( clients.keystone(), "projects"): self.fail(msg % {"component": self.component, "version": versions_str, "found_version": "3"}) else: av_ctx = config.get("contexts", {}).get( "api_versions@openstack", {}) default_version = getattr(clients, self.component).choose_version() used_version = av_ctx.get(self.component, {}).get( "version", default_version) if not used_version: self.fail("Unable to determine the API version.") if str(used_version) not in versions: self.fail(msg % {"component": self.component, "version": versions_str, "found_version": used_version}) @validation.add("required_platform", platform="openstack", users=True) @validation.configure(name="volume_type_exists", platform="openstack") class VolumeTypeExistsValidator(validation.Validator): def __init__(self, param_name, nullable=True): """Returns validator for volume types. :param param_name: defines variable to be used as the flag to determine if volume types should be checked for existence. :param nullable: defines volume_type param is required """ super(VolumeTypeExistsValidator, self).__init__() self.param = param_name self.nullable = nullable @with_roles_ctx() def validate(self, context, config, plugin_cls, plugin_cfg): volume_type = config.get("args", {}).get(self.param, False) if not volume_type: if self.nullable: return self.fail("The parameter '%s' is required and should not be empty." % self.param) for user in context["users"]: clients = user["credential"].clients() vt_names = [vt.name for vt in clients.cinder().volume_types.list()] ctx = config.get("contexts", {}).get("volume_types", []) vt_names += ctx if volume_type not in vt_names: self.fail("Specified volume type %s not found for user %s." " List of available types: %s" % (volume_type, user, vt_names)) @validation.configure(name="workbook_contains_workflow", platform="openstack") class WorkbookContainsWorkflowValidator(validators.FileExistsValidator): def __init__(self, workbook_param, workflow_param): """Validate that workflow exist in workbook when workflow is passed :param workbook_param: parameter containing the workbook definition :param workflow_param: parameter containing the workflow name """ super(WorkbookContainsWorkflowValidator, self).__init__(workflow_param) self.workbook = workbook_param self.workflow = workflow_param def validate(self, context, config, plugin_cls, plugin_cfg): wf_name = config.get("args", {}).get(self.workflow) if wf_name: wb_path = config.get("args", {}).get(self.workbook) wb_path = os.path.expanduser(wb_path) self._file_access_ok(wb_path, mode=os.R_OK, param_name=self.workbook) with open(wb_path, "r") as wb_def: wb_def = yaml.safe_load(wb_def) if wf_name not in wb_def["workflows"]: self.fail("workflow '%s' not found in the definition '%s'" % (wf_name, wb_def)) @validation.configure(name="required_context_config", platform="openstack") class RequiredContextConfigValidator(validation.Validator): def __init__(self, context_name, context_config): """Validate that context is configured according to requirements. :param context_name: string efining context name :param context_config: dictionary of required key/value pairs """ super(RequiredContextConfigValidator, self).__init__() self.context_name = context_name self.context_config = context_config def validate(self, context, config, plugin_cls, plugin_cfg): if self.context_name not in config.get("contexts", {}): # fail silently. if it is required context, # `required_contexts` validator should raise proper error return ctx_config = config["contexts"].get(self.context_name) for key, value in self.context_config.items(): if key not in ctx_config or ctx_config[key] != value: self.fail( f"The '{self.context_name}' context " f"expects '{self.context_config}'")
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,781
openstack/rally-openstack
refs/heads/master
/tests/unit/task/contexts/dataplane/test_heat.py
# Copyright 2016: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from unittest import mock from rally_openstack.task.contexts.dataplane import heat as heat_dataplane from tests.unit import test MOD = "rally_openstack.task.contexts.dataplane.heat." class TestHeatWorkload(test.ScenarioTestCase): @mock.patch(MOD + "pkgutil") def test_get_data_resource(self, mock_pkgutil): mock_pkgutil.get_data.return_value = "fake_data" data = heat_dataplane.get_data([1, 2]) self.assertEqual("fake_data", data) mock_pkgutil.get_data.assert_called_once_with(1, 2) @mock.patch(MOD + "open") def test_get_data_file(self, mock_open): data = heat_dataplane.get_data(1) self.assertEqual(mock_open.return_value.read.return_value, data) mock_open.assert_called_once_with(1) def test__get_context_parameter(self): user = [1, 2] tenant = [3, 4, {"one": 1}] self.context["tenants"] = {1: tenant} ctx = heat_dataplane.HeatDataplane(self.context) gcp = functools.partial(ctx._get_context_parameter, user, 1) self.assertEqual(1, gcp("user.0")) self.assertEqual(2, gcp("user.1")) self.assertEqual(3, gcp("tenant.0")) self.assertEqual(1, gcp("tenant.2.one")) @mock.patch(MOD + "osclients.Clients") def test__get_public_network_id(self, mock_clients): fake_net = {"id": "fake_id"} fake_nc = mock.Mock(name="fake_neutronclient") fake_nc.list_networks.return_value = {"networks": [fake_net]} mock_clients.neutron.return_value = fake_nc mock_clients.return_value = mock.Mock( neutron=mock.Mock(return_value=fake_nc)) self.context["admin"] = {"credential": "fake_credential"} ctx = heat_dataplane.HeatDataplane(self.context) network_id = ctx._get_public_network_id() self.assertEqual("fake_id", network_id) mock_clients.assert_called_once_with("fake_credential") @mock.patch(MOD + "get_data") @mock.patch(MOD + "HeatDataplane._get_context_parameter") @mock.patch(MOD + "heat_utils") def test_setup(self, mock_heat_utils, mock_heat_dataplane__get_context_parameter, mock_get_data): self.context.update({ "config": { "heat_dataplane": { "stacks_per_tenant": 1, "template": "tpl.yaml", "files": {"file1": "f1.yaml", "file2": "f2.yaml"}, "parameters": {"key": "value"}, "context_parameters": {"ctx.key": "ctx.value"}, } }, "users": [{"tenant_id": "t1", "keypair": {"name": "kp1"}}, ], "tenants": {"t1": {"networks": [{"router_id": "rid"}]}}, }) mock_heat_dataplane__get_context_parameter.return_value = "gcp" mock_get_data.side_effect = ["tpl", "sf1", "sf2"] ctx = heat_dataplane.HeatDataplane(self.context) ctx._get_public_network_id = mock.Mock(return_value="fake_net") ctx.setup() workloads = self.context["tenants"]["t1"]["stack_dataplane"] self.assertEqual(1, len(workloads)) wl = workloads[0] fake_scenario = mock_heat_utils.HeatScenario.return_value self.assertEqual(fake_scenario._create_stack.return_value.id, wl[0]) self.assertEqual("tpl", wl[1]) self.assertIn("sf1", wl[2].values()) self.assertIn("sf2", wl[2].values()) expected = { "ctx.key": "gcp", "key": "value", "key_name": "kp1", "network_id": "fake_net", "router_id": "rid"} self.assertEqual(expected, wl[3])
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,782
openstack/rally-openstack
refs/heads/master
/tests/unit/common/test_credential.py
# Copyright 2017: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally_openstack.common import credential from tests.unit import test class OpenStackCredentialTestCase(test.TestCase): def setUp(self): super(OpenStackCredentialTestCase, self).setUp() self.credential = credential.OpenStackCredential( "foo_url", "foo_user", "foo_password", tenant_name="foo_tenant") def test_to_dict(self): self.assertEqual({"auth_url": "foo_url", "username": "foo_user", "password": "foo_password", "tenant_name": "foo_tenant", "region_name": None, "domain_name": None, "permission": None, "endpoint": None, "endpoint_type": None, "https_insecure": False, "https_cacert": None, "https_cert": None, "project_domain_name": None, "user_domain_name": None, "profiler_hmac_key": None, "profiler_conn_str": None, "api_info": {}}, self.credential.to_dict()) @mock.patch("rally_openstack.common.osclients.Clients") def test_clients(self, mock_clients): clients = self.credential.clients() mock_clients.assert_called_once_with(self.credential, cache={}) self.assertIs(mock_clients.return_value, clients)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,783
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/contexts/heat/stacks.py
# Copyright 2015: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import validation from rally_openstack.common import consts from rally_openstack.task.cleanup import manager as resource_manager from rally_openstack.task import context from rally_openstack.task.scenarios.heat import utils as heat_utils @validation.add("required_platform", platform="openstack", users=True) @context.configure(name="stacks", platform="openstack", order=435) class StackGenerator(context.OpenStackContext): """Context class for create temporary stacks with resources. Stack generator allows to generate arbitrary number of stacks for each tenant before test scenarios. In addition, it allows to define number of resources (namely OS::Heat::RandomString) that will be created inside each stack. After test execution the stacks will be automatically removed from heat. """ # The schema of the context configuration format CONFIG_SCHEMA = { "type": "object", "$schema": consts.JSON_SCHEMA, "properties": { "stacks_per_tenant": { "type": "integer", "minimum": 1 }, "resources_per_stack": { "type": "integer", "minimum": 1 } }, "additionalProperties": False } DEFAULT_CONFIG = { "stacks_per_tenant": 2, "resources_per_stack": 10 } @staticmethod def _prepare_stack_template(res_num): template = { "heat_template_version": "2014-10-16", "description": "Test template for rally", "resources": {} } rand_string = {"type": "OS::Heat::RandomString"} for i in range(res_num): template["resources"]["TestResource%d" % i] = rand_string return template def setup(self): template = self._prepare_stack_template( self.config["resources_per_stack"]) for user, tenant_id in self._iterate_per_tenants(): heat_scenario = heat_utils.HeatScenario( {"user": user, "task": self.context["task"], "owner_id": self.context["owner_id"]}) self.context["tenants"][tenant_id]["stacks"] = [] for i in range(self.config["stacks_per_tenant"]): stack = heat_scenario._create_stack(template) self.context["tenants"][tenant_id]["stacks"].append(stack.id) def cleanup(self): resource_manager.cleanup(names=["heat.stacks"], users=self.context.get("users", []), superclass=heat_utils.HeatScenario, task_id=self.get_owner_id())
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,784
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/cinder/test_volumes.py
# Copyright 2013 Huawei Technologies Co.,LTD. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from rally_openstack.task.scenarios.cinder import volumes from tests.unit import test CINDER_VOLUMES = ("rally_openstack.task.scenarios.cinder.volumes") @ddt.ddt class CinderServersTestCase(test.ScenarioTestCase): def _get_context(self): context = test.get_test_context() context.update({ "admin": { "id": "fake_user_id", "credential": mock.MagicMock() }, "user": {"id": "fake_user_id", "credential": mock.MagicMock()}, "tenant": {"id": "fake", "name": "fake", "volumes": [{"id": "uuid", "size": 1}], "servers": [1]}}) return context def setUp(self): super(CinderServersTestCase, self).setUp() patch = mock.patch( "rally_openstack.common.services.storage.block.BlockStorage") self.addCleanup(patch.stop) self.mock_cinder = patch.start() def test_create_and_list_volume(self): mock_service = self.mock_cinder.return_value scenario = volumes.CreateAndListVolume(self._get_context()) scenario.run(1, True, fakearg="f") mock_service.create_volume.assert_called_once_with(1, fakearg="f") mock_service.list_volumes.assert_called_once_with(True) def test_create_and_get_volume(self): mock_service = self.mock_cinder.return_value scenario = volumes.CreateAndGetVolume(self._get_context()) scenario.run(1, fakearg="f") mock_service.create_volume.assert_called_once_with(1, fakearg="f") mock_service.get_volume.assert_called_once_with( mock_service.create_volume.return_value.id) def test_list_volumes(self): mock_service = self.mock_cinder.return_value scenario = volumes.ListVolumes(self._get_context()) scenario.run(True) mock_service.list_volumes.assert_called_once_with( True, limit=None, marker=None, search_opts=None, sort=None) def test_list_types(self): mock_service = self.mock_cinder.return_value scenario = volumes.ListTypes(self._get_context()) scenario.run(None, is_public=None) mock_service.list_types.assert_called_once_with(None, is_public=None) def test_list_transfers(self): mock_service = self.mock_cinder.return_value scenario = volumes.ListTransfers(self._get_context()) scenario._list_transfers = mock.MagicMock() scenario.run(True, search_opts=None) mock_service.list_transfers.assert_called_once_with( True, search_opts=None) @ddt.data({"update_args": {"description": "desp"}, "expected": {"description": "desp"}}, {"update_args": {"update_name": True, "description": "desp"}, "expected": {"name": "new_name", "description": "desp"}}) @ddt.unpack def test_create_and_update_volume(self, update_args, expected): mock_service = self.mock_cinder.return_value scenario = volumes.CreateAndUpdateVolume(self._get_context()) scenario.generate_random_name = mock.MagicMock() scenario.generate_random_name.return_value = "new_name" scenario.run(1, update_volume_kwargs=update_args) mock_service.create_volume.assert_called_once_with(1) mock_service.update_volume.assert_called_once_with( mock_service.create_volume.return_value, **expected) if update_args.get("update_name", False): scenario.generate_random_name.assert_called_once_with() def test_create_volume_and_update_readonly_flag(self): mock_service = self.mock_cinder.return_value scenario = volumes.CreateVolumeAndUpdateReadonlyFlag( self._get_context()) scenario.run(1, image=None, read_only=True, fakearg="f") mock_service.create_volume.assert_called_once_with(1, fakearg="f") mock_service.update_readonly_flag.assert_called_once_with( mock_service.create_volume.return_value.id, read_only=True) def test_create_and_delete_volume(self): mock_service = self.mock_cinder.return_value scenario = volumes.CreateAndDeleteVolume(self._get_context()) scenario.sleep_between = mock.MagicMock() scenario.run(size=1, min_sleep=10, max_sleep=20, fakearg="f") mock_service.create_volume.assert_called_once_with(1, fakearg="f") scenario.sleep_between.assert_called_once_with(10, 20) mock_service.delete_volume.assert_called_once_with( mock_service.create_volume.return_value) def test_create_volume(self): mock_service = self.mock_cinder.return_value scenario = volumes.CreateVolume(self._get_context()) scenario.run(1, fakearg="f") mock_service.create_volume.assert_called_once_with(1, fakearg="f") def test_create_volume_and_modify_metadata(self): mock_service = self.mock_cinder.return_value scenario = volumes.ModifyVolumeMetadata(self._get_context()) scenario.run(sets=5, set_size=4, deletes=3, delete_size=2) mock_service.set_metadata.assert_called_once_with( "uuid", set_size=4, sets=5) mock_service.delete_metadata.assert_called_once_with( "uuid", keys=mock_service.set_metadata.return_value, deletes=3, delete_size=2) def test_create_and_extend_volume(self): mock_service = self.mock_cinder.return_value scenario = volumes.CreateAndExtendVolume(self._get_context()) scenario.sleep_between = mock.MagicMock() scenario.run(1, 2, 10, 20, fakearg="f") mock_service.create_volume.assert_called_once_with(1, fakearg="f") mock_service.extend_volume.assert_called_once_with( mock_service.create_volume.return_value, new_size=2) scenario.sleep_between.assert_called_once_with(10, 20) mock_service.delete_volume.assert_called_once_with( mock_service.create_volume.return_value) def test_create_from_image_and_delete_volume(self): mock_service = self.mock_cinder.return_value scenario = volumes.CreateAndDeleteVolume(self._get_context()) scenario.run(1, image="fake_image") mock_service.create_volume.assert_called_once_with( 1, imageRef="fake_image") mock_service.delete_volume.assert_called_once_with( mock_service.create_volume.return_value) def test_create_volume_from_image(self): mock_service = self.mock_cinder.return_value scenario = volumes.CreateVolume(self._get_context()) scenario.run(1, image="fake_image") mock_service.create_volume.assert_called_once_with( 1, imageRef="fake_image") def test_create_volume_from_image_and_list(self): mock_service = self.mock_cinder.return_value scenario = volumes.CreateAndListVolume(self._get_context()) scenario.run(1, True, "fake_image") mock_service.create_volume.assert_called_once_with( 1, imageRef="fake_image") mock_service.list_volumes.assert_called_once_with(True) def test_create_from_volume_and_delete_volume(self): mock_service = self.mock_cinder.return_value vol_size = 1 scenario = volumes.CreateFromVolumeAndDeleteVolume(self._get_context()) scenario.run(vol_size) mock_service.create_volume.assert_called_once_with( 1, source_volid="uuid") mock_service.delete_volume.assert_called_once_with( mock_service.create_volume.return_value) @mock.patch("%s.CreateAndDeleteSnapshot.sleep_between" % CINDER_VOLUMES) def test_create_and_delete_snapshot(self, mock_sleep_between): mock_service = self.mock_cinder.return_value scenario = volumes.CreateAndDeleteSnapshot(self._get_context()) scenario.run(False, 10, 20, fakearg="f") mock_service.create_snapshot.assert_called_once_with("uuid", force=False, fakearg="f") mock_sleep_between.assert_called_once_with(10, 20) mock_service.delete_snapshot.assert_called_once_with( mock_service.create_snapshot.return_value) def test_create_and_list_snapshots(self): mock_service = self.mock_cinder.return_value scenario = volumes.CreateAndListSnapshots(self._get_context()) scenario.run(False, True, fakearg="f") mock_service.create_snapshot.assert_called_once_with("uuid", force=False, fakearg="f") mock_service.list_snapshots.assert_called_once_with(True) def test_create_and_attach_volume(self): fake_server = mock.MagicMock() mock_service = self.mock_cinder.return_value scenario = volumes.CreateAndAttachVolume(self._get_context()) scenario._boot_server = mock.MagicMock(return_value=fake_server) scenario._delete_server = mock.MagicMock() scenario._attach_volume = mock.MagicMock() scenario._detach_volume = mock.MagicMock() volume_args = {"some_key": "some_val"} vm_args = {"some_key": "some_val"} scenario.run(10, "img", "0", create_volume_params=volume_args, create_vm_params=vm_args) mock_service.create_volume.assert_called_once_with( 10, **volume_args) scenario._attach_volume.assert_called_once_with( fake_server, mock_service.create_volume.return_value) scenario._detach_volume.assert_called_once_with( fake_server, mock_service.create_volume.return_value) mock_service.delete_volume.assert_called_once_with( mock_service.create_volume.return_value) scenario._delete_server.assert_called_once_with(fake_server) @mock.patch("rally_openstack.common.services.image.image.Image") def test_create_and_upload_volume_to_image(self, mock_image): mock_volume_service = self.mock_cinder.return_value mock_image_service = mock_image.return_value scenario = volumes.CreateAndUploadVolumeToImage(self._get_context()) scenario.run(2, image="img", container_format="fake", disk_format="disk", do_delete=False, fakeargs="fakeargs") mock_volume_service.create_volume.assert_called_once_with( 2, imageRef="img", fakeargs="fakeargs") mock_volume_service.upload_volume_to_image.assert_called_once_with( mock_volume_service.create_volume.return_value, container_format="fake", disk_format="disk", force=False) mock_volume_service.create_volume.reset_mock() mock_volume_service.upload_volume_to_image.reset_mock() scenario.run(1, image=None, do_delete=True, fakeargs="fakeargs") mock_volume_service.create_volume.assert_called_once_with( 1, fakeargs="fakeargs") mock_volume_service.upload_volume_to_image.assert_called_once_with( mock_volume_service.create_volume.return_value, container_format="bare", disk_format="raw", force=False) mock_volume_service.delete_volume.assert_called_once_with( mock_volume_service.create_volume.return_value) mock_image_service.delete_image.assert_called_once_with( mock_volume_service.upload_volume_to_image.return_value.id) def test_create_snapshot_and_attach_volume(self): mock_service = self.mock_cinder.return_value scenario = volumes.CreateSnapshotAndAttachVolume(self._get_context()) scenario._boot_server = mock.MagicMock() scenario._attach_volume = mock.MagicMock() scenario._detach_volume = mock.MagicMock() scenario.run("img", "flavor") self.assertTrue(mock_service.create_volume.called) volume = mock_service.create_volume.return_value snapshot = mock_service.create_snapshot.return_value mock_service.create_snapshot.assert_called_once_with(volume.id, force=False) mock_service.delete_snapshot.assert_called_once_with(snapshot) scenario._attach_volume.assert_called_once_with( scenario._boot_server.return_value, volume) scenario._detach_volume.assert_called_once_with( scenario._boot_server.return_value, volume) mock_service.delete_volume.assert_called_once_with(volume) @mock.patch("random.choice") def test_create_snapshot_and_attach_volume_use_volume_type_with_name( self, mock_choice): mock_service = self.mock_cinder.return_value scenario = volumes.CreateSnapshotAndAttachVolume(self._get_context()) scenario._boot_server = mock.MagicMock() scenario._attach_volume = mock.MagicMock() scenario._detach_volume = mock.MagicMock() scenario.run("img", "flavor", volume_type="type") fake_volume = mock_service.create_volume.return_value fake_server = scenario._boot_server.return_value fake_snapshot = mock_service.create_snapshot.return_value mock_service.create_volume.assert_called_once_with( {"min": 1, "max": 5}, volume_type="type") mock_service.create_snapshot.assert_called_once_with(fake_volume.id, force=False) mock_service.delete_snapshot.assert_called_once_with(fake_snapshot) scenario._attach_volume.assert_called_once_with(fake_server, fake_volume) scenario._detach_volume.assert_called_once_with(fake_server, fake_volume) mock_service.delete_volume.assert_called_once_with(fake_volume) @mock.patch("random.randint") def test_create_nested_snapshots_and_attach_volume(self, mock_randint): mock_service = self.mock_cinder.return_value mock_randint.return_value = 2 volume_kwargs = {"volume_type": "type1"} snapshot_kwargs = {"name": "snapshot1", "description": "snaphot one"} scenario = volumes.CreateNestedSnapshotsAndAttachVolume( context=self._get_context()) scenario._boot_server = mock.MagicMock() scenario._attach_volume = mock.MagicMock() scenario._detach_volume = mock.MagicMock() scenario.run("img", "flavor", create_volume_kwargs=volume_kwargs, create_snapshot_kwargs=snapshot_kwargs) mock_service.create_volume.assert_called_once_with( mock_randint.return_value, **volume_kwargs) mock_service.create_snapshot.assert_called_once_with( mock_service.create_volume.return_value.id, force=False, **snapshot_kwargs) scenario._attach_volume(scenario._boot_server.return_value, mock_service.create_volume.return_value) mock_service.delete_volume.assert_called_once_with( mock_service.create_volume.return_value) mock_service.delete_snapshot.assert_called_once_with( mock_service.create_snapshot.return_value) scenario._detach_volume.assert_called_once_with( scenario._boot_server.return_value, mock_service.create_volume.return_value) @mock.patch("random.randint") def test_create_nested_snapshots_and_attach_volume_2(self, mock_randint): mock_service = self.mock_cinder.return_value mock_randint.return_value = 2 nested_level = 3 volume_size = mock_randint.return_value fake_volumes = [mock.Mock(size=volume_size) for i in range(nested_level)] fake_snapshots = [mock.Mock() for i in range(nested_level)] mock_service.create_volume.side_effect = fake_volumes mock_service.create_snapshot.side_effect = fake_snapshots scenario = volumes.CreateNestedSnapshotsAndAttachVolume( context=self._get_context()) scenario._boot_server = mock.MagicMock() scenario._attach_volume = mock.MagicMock() scenario._detach_volume = mock.MagicMock() scenario.run("img", "flavor", nested_level=nested_level) expected_volumes = [mock.call(volume_size)] expected_snapshots = [mock.call(fake_volumes[0].id, force=False)] expected_attachs = [mock.call(scenario._boot_server.return_value, fake_volumes[0])] for i in range(nested_level - 1): expected_volumes.append( mock.call(volume_size, snapshot_id=fake_snapshots[i].id)) expected_snapshots.append( mock.call(fake_volumes[i + 1].id, force=False)) expected_attachs.append( mock.call(scenario._boot_server.return_value, fake_volumes[i + 1])) mock_service.create_volume.assert_has_calls(expected_volumes) mock_service.create_snapshot.assert_has_calls(expected_snapshots) scenario._attach_volume.assert_has_calls(expected_attachs) fake_volumes.reverse() fake_snapshots.reverse() mock_service.delete_volume.assert_has_calls( [mock.call(volume) for volume in fake_volumes]) mock_service.delete_snapshot.assert_has_calls( [mock.call(snapshot) for snapshot in fake_snapshots]) scenario._detach_volume.assert_has_calls( [mock.call(scenario._boot_server.return_value, fake_volumes[i]) for i in range(len(fake_volumes))]) def test_create_volume_backup(self): mock_service = self.mock_cinder.return_value scenario = volumes.CreateVolumeBackup(self._get_context()) volume_kwargs = {"some_var": "zaq"} scenario.run(1, do_delete=True, create_volume_kwargs=volume_kwargs) mock_service.create_volume.assert_called_once_with(1, **volume_kwargs) mock_service.create_backup.assert_called_once_with( mock_service.create_volume.return_value.id) mock_service.delete_volume.assert_called_once_with( mock_service.create_volume.return_value) mock_service.delete_backup.assert_called_once_with( mock_service.create_backup.return_value) def test_create_volume_backup_no_delete(self): mock_service = self.mock_cinder.return_value scenario = volumes.CreateVolumeBackup(self._get_context()) volume_kwargs = {"some_var": "zaq"} scenario.run(1, do_delete=False, create_volume_kwargs=volume_kwargs) mock_service.create_volume.assert_called_once_with(1, **volume_kwargs) mock_service.create_backup.assert_called_once_with( mock_service.create_volume.return_value.id) self.assertFalse(mock_service.delete_volume.called) self.assertFalse(mock_service.delete_backup.called) def test_create_and_restore_volume_backup(self): mock_service = self.mock_cinder.return_value volume_kwargs = {"some_var": "zaq"} scenario = volumes.CreateAndRestoreVolumeBackup(self._get_context()) scenario.run(1, do_delete=True, create_volume_kwargs=volume_kwargs) fake_volume = mock_service.create_volume.return_value fake_backup = mock_service.create_backup.return_value mock_service.create_volume.assert_called_once_with(1, **volume_kwargs) mock_service.create_backup.assert_called_once_with(fake_volume.id) mock_service.restore_backup.assert_called_once_with(fake_backup.id) mock_service.delete_volume.assert_called_once_with(fake_volume) mock_service.delete_backup.assert_called_once_with(fake_backup) def test_create_and_restore_volume_backup_no_delete(self): mock_service = self.mock_cinder.return_value volume_kwargs = {"some_var": "zaq"} scenario = volumes.CreateAndRestoreVolumeBackup(self._get_context()) scenario.run(1, do_delete=False, create_volume_kwargs=volume_kwargs) fake_volume = mock_service.create_volume.return_value fake_backup = mock_service.create_backup.return_value mock_service.create_volume.assert_called_once_with(1, **volume_kwargs) mock_service.create_backup.assert_called_once_with(fake_volume.id) mock_service.restore_backup.assert_called_once_with(fake_backup.id) self.assertFalse(mock_service.delete_volume.called) self.assertFalse(mock_service.delete_backup.called) def test_create_and_list_volume_backups(self): mock_service = self.mock_cinder.return_value volume_kwargs = {"some_var": "zaq"} scenario = volumes.CreateAndListVolumeBackups(self._get_context()) scenario.run(1, detailed=True, do_delete=True, create_volume_kwargs=volume_kwargs) fake_volume = mock_service.create_volume.return_value fake_backup = mock_service.create_backup.return_value mock_service.create_volume.assert_called_once_with(1, **volume_kwargs) mock_service.create_backup.assert_called_once_with(fake_volume.id) mock_service.list_backups.assert_called_once_with(True) mock_service.delete_volume.assert_called_once_with(fake_volume) mock_service.delete_backup.assert_called_once_with(fake_backup) def test_create_and_list_volume_backups_no_delete(self): mock_service = self.mock_cinder.return_value volume_kwargs = {"some_var": "zaq"} scenario = volumes.CreateAndListVolumeBackups(self._get_context()) scenario.run(1, detailed=True, do_delete=False, create_volume_kwargs=volume_kwargs) fake_volume = mock_service.create_volume.return_value mock_service.create_volume.assert_called_once_with(1, **volume_kwargs) mock_service.create_backup.assert_called_once_with(fake_volume.id) mock_service.list_backups.assert_called_once_with(True) self.assertFalse(mock_service.delete_volume.called) self.assertFalse(mock_service.delete_backup.called) @ddt.data({}, {"nested_level": 2}, {"image": "img"}) @ddt.unpack def test_create_volume_and_clone(self, nested_level=1, image=None): create_volumes_count = nested_level + 1 fake_volumes = [mock.Mock(size=1) for i in range(create_volumes_count)] mock_service = self.mock_cinder.return_value mock_service.create_volume.side_effect = fake_volumes scenario = volumes.CreateVolumeAndClone(self._get_context()) scenario.run(1, image=image, nested_level=nested_level, fakearg="fake") expected = [mock.call(1, imageRef=image, fakearg="fake") if image else mock.call(1, fakearg="fake")] for i in range(nested_level): expected.append(mock.call(fake_volumes[i].size, source_volid=fake_volumes[i].id, fakearg="fake") ) self._test_atomic_action_timer(scenario.atomic_actions(), "cinder.clone_volume", count=nested_level) mock_service.create_volume.assert_has_calls(expected) def test_create_volume_from_snapshot(self): mock_service = self.mock_cinder.return_value create_snapshot_args = {"force": False} scenario = volumes.CreateVolumeFromSnapshot(self._get_context()) scenario.run(fakearg="f") fake_snapshot = mock_service.create_snapshot.return_value fake_volume = mock_service.create_volume.return_value mock_service.create_snapshot.assert_called_once_with("uuid") mock_service.create_volume.assert_called_once_with( 1, snapshot_id=fake_snapshot.id, fakearg="f") mock_service.delete_snapshot.assert_called_once_with(fake_snapshot) mock_service.delete_volume.assert_called_once_with(fake_volume) mock_service.create_snapshot.reset_mock() mock_service.create_volume.reset_mock() mock_service.delete_snapshot.reset_mock() mock_service.delete_volume.reset_mock() scenario.run(do_delete=False, create_snapshot_kwargs=create_snapshot_args, fakearg="f") mock_service.create_snapshot.assert_called_once_with( "uuid", **create_snapshot_args) mock_service.create_volume.assert_called_once_with( 1, snapshot_id=fake_snapshot.id, fakearg="f") self.assertFalse(mock_service.delete_snapshot.called) self.assertFalse(mock_service.delete_volume.called) @ddt.data({}, {"image": "img"}) @ddt.unpack def test_create_and_accept_transfer(self, image=None): mock_service = self.mock_cinder.return_value scenario = volumes.CreateAndAcceptTransfer(self._get_context()) scenario.run(1, image=image, fakearg="fake") expected = [mock.call(1, imageRef=image, fakearg="fake") if image else mock.call(1, fakearg="fake")] mock_service.create_volume.assert_has_calls(expected) mock_service.transfer_create.assert_called_once_with( mock_service.create_volume.return_value.id) mock_service.transfer_accept.assert_called_once_with( mock_service.transfer_create.return_value.id, auth_key=mock_service.transfer_create.return_value.auth_key)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,785
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/murano/test_utils.py
# Copyright 2015: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally.common import cfg from rally_openstack.task.scenarios.murano import utils from tests.unit import test MRN_UTILS = "rally_openstack.task.scenarios.murano.utils" CONF = cfg.CONF class MuranoScenarioTestCase(test.ScenarioTestCase): def test_list_environments(self): self.clients("murano").environments.list.return_value = [] scenario = utils.MuranoScenario(context=self.context) return_environments_list = scenario._list_environments() self.assertEqual([], return_environments_list) self._test_atomic_action_timer(scenario.atomic_actions(), "murano.list_environments") def test_create_environments(self): self.clients("murano").environments.create = mock.Mock() scenario = utils.MuranoScenario(context=self.context) scenario.generate_random_name = mock.Mock() create_env = scenario._create_environment() self.assertEqual( create_env, self.clients("murano").environments.create.return_value) self.clients("murano").environments.create.assert_called_once_with( {"name": scenario.generate_random_name.return_value}) self._test_atomic_action_timer(scenario.atomic_actions(), "murano.create_environment") def test_delete_environment(self): environment = mock.Mock(id="id") self.clients("murano").environments.delete.return_value = "ok" scenario = utils.MuranoScenario(context=self.context) scenario._delete_environment(environment) self.clients("murano").environments.delete.assert_called_once_with( environment.id ) def test_create_session(self): self.clients("murano").sessions.configure.return_value = "sess" scenario = utils.MuranoScenario(context=self.context) create_sess = scenario._create_session("id") self.assertEqual("sess", create_sess) self._test_atomic_action_timer(scenario.atomic_actions(), "murano.create_session") def test__create_service(self,): self.clients("murano").services.post.return_value = "app" mock_env = mock.Mock(id="ip") mock_sess = mock.Mock(id="ip") scenario = utils.MuranoScenario(context=self.context) create_app = scenario._create_service(mock_env, mock_sess, "fake_full_name") self.assertEqual("app", create_app) self._test_atomic_action_timer(scenario.atomic_actions(), "murano.create_service") def test_deploy_environment(self): environment = mock.Mock(id="id") session = mock.Mock(id="id") self.clients("murano").sessions.deploy.return_value = "ok" scenario = utils.MuranoScenario(context=self.context) scenario._deploy_environment(environment, session) self.clients("murano").sessions.deploy.assert_called_once_with( environment.id, session.id ) config = CONF.openstack self.mock_wait_for_status.mock.assert_called_once_with( environment, update_resource=self.mock_get_from_manager.mock.return_value, ready_statuses=["READY"], check_interval=config.murano_deploy_environment_check_interval, timeout=config.murano_deploy_environment_timeout) self.mock_get_from_manager.mock.assert_called_once_with( ["DEPLOY FAILURE"]) self._test_atomic_action_timer(scenario.atomic_actions(), "murano.deploy_environment") @mock.patch(MRN_UTILS + ".open", side_effect=mock.mock_open(read_data="Key: value"), create=True) def test_read_from_file(self, mock_open): utility = utils.MuranoPackageManager({"uuid": "fake_task_id"}) data = utility._read_from_file("filename") expected_data = {"Key": "value"} self.assertEqual(expected_data, data) @mock.patch(MRN_UTILS + ".MuranoPackageManager._read_from_file") @mock.patch(MRN_UTILS + ".MuranoPackageManager._write_to_file") def test_change_app_fullname( self, mock_murano_package_manager__write_to_file, mock_murano_package_manager__read_from_file): manifest = {"FullName": "app.name_abc", "Classes": {"app.name_abc": "app_class.yaml"}} mock_murano_package_manager__read_from_file.side_effect = ( [manifest]) utility = utils.MuranoPackageManager({"uuid": "fake_task_id"}) utility._change_app_fullname("tmp/tmpfile/") mock_murano_package_manager__read_from_file.assert_has_calls( [mock.call("tmp/tmpfile/manifest.yaml")] ) mock_murano_package_manager__write_to_file.assert_has_calls( [mock.call(manifest, "tmp/tmpfile/manifest.yaml")] ) @mock.patch("zipfile.is_zipfile") @mock.patch("tempfile.mkdtemp") @mock.patch("shutil.copytree") @mock.patch(MRN_UTILS + ".MuranoPackageManager._change_app_fullname") @mock.patch("%s.pack_dir" % MRN_UTILS) @mock.patch("shutil.rmtree") def test_prepare_zip_if_not_zip( self, mock_shutil_rmtree, mock_pack_dir, mock_murano_package_manager__change_app_fullname, mock_shutil_copytree, mock_tempfile_mkdtemp, mock_zipfile_is_zipfile): utility = utils.MuranoPackageManager({"uuid": "fake_task_id"}) package_path = "tmp/tmpfile" mock_zipfile_is_zipfile.return_value = False mock_tempfile_mkdtemp.return_value = "tmp/tmpfile" mock_pack_dir.return_value = "tmp/tmpzipfile" zip_file = utility._prepare_package(package_path) self.assertEqual("tmp/tmpzipfile", zip_file) mock_tempfile_mkdtemp.assert_called_once_with() mock_shutil_copytree.assert_called_once_with( "tmp/tmpfile", "tmp/tmpfile/package/" ) (mock_murano_package_manager__change_app_fullname. assert_called_once_with("tmp/tmpfile/package/")) mock_shutil_rmtree.assert_called_once_with("tmp/tmpfile") @mock.patch("zipfile.is_zipfile") def test_prepare_zip_if_zip(self, mock_zipfile_is_zipfile): utility = utils.MuranoPackageManager({"uuid": "fake_task_id"}) package_path = "tmp/tmpfile.zip" mock_zipfile_is_zipfile.return_value = True zip_file = utility._prepare_package(package_path) self.assertEqual("tmp/tmpfile.zip", zip_file) def test_list_packages(self): scenario = utils.MuranoScenario() self.assertEqual(self.clients("murano").packages.list.return_value, scenario._list_packages()) self._test_atomic_action_timer(scenario.atomic_actions(), "murano.list_packages") @mock.patch(MRN_UTILS + ".open", create=True) def test_import_package(self, mock_open): self.clients("murano").packages.create.return_value = ( "created_foo_package" ) scenario = utils.MuranoScenario() mock_open.return_value = "opened_foo_package.zip" imp_package = scenario._import_package("foo_package.zip") self.assertEqual("created_foo_package", imp_package) self.clients("murano").packages.create.assert_called_once_with( {}, {"file": "opened_foo_package.zip"}) mock_open.assert_called_once_with("foo_package.zip") self._test_atomic_action_timer(scenario.atomic_actions(), "murano.import_package") def test_delete_package(self): package = mock.Mock(id="package_id") scenario = utils.MuranoScenario() scenario._delete_package(package) self.clients("murano").packages.delete.assert_called_once_with( "package_id" ) self._test_atomic_action_timer(scenario.atomic_actions(), "murano.delete_package") def test_update_package(self): package = mock.Mock(id="package_id") self.clients("murano").packages.update.return_value = "updated_package" scenario = utils.MuranoScenario() upd_package = scenario._update_package( package, {"tags": ["tag"]}, "add" ) self.assertEqual("updated_package", upd_package) self.clients("murano").packages.update.assert_called_once_with( "package_id", {"tags": ["tag"]}, "add" ) self._test_atomic_action_timer(scenario.atomic_actions(), "murano.update_package") def test_filter_packages(self): self.clients("murano").packages.filter.return_value = [] scenario = utils.MuranoScenario() return_apps_list = scenario._filter_applications( {"category": "Web"} ) self.assertEqual([], return_apps_list) self.clients("murano").packages.filter.assert_called_once_with( category="Web" ) self._test_atomic_action_timer(scenario.atomic_actions(), "murano.filter_applications")
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,786
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/monasca/metrics.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.task import validation from rally_openstack.common import consts from rally_openstack.task import scenario from rally_openstack.task.scenarios.monasca import utils as monascautils """Scenarios for monasca Metrics API.""" @validation.add("required_services", services=[consts.Service.MONASCA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(name="MonascaMetrics.list_metrics", platform="openstack") class ListMetrics(monascautils.MonascaScenario): def run(self, **kwargs): """Fetch user's metrics. :param kwargs: optional arguments for list query: name, dimensions, start_time, etc """ self._list_metrics(**kwargs)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,787
openstack/rally-openstack
refs/heads/master
/tests/unit/common/services/storage/test_cinder_common.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid import ddt from rally.common import cfg from rally import exceptions from rally_openstack.common import service from rally_openstack.common.services.storage import block from rally_openstack.common.services.storage import cinder_common from tests.unit import fakes from tests.unit import test BASE_PATH = "rally_openstack.common.services.storage" CONF = cfg.CONF class FullCinder(service.Service, cinder_common.CinderMixin): """Implementation of CinderMixin with Service base class.""" pass @ddt.ddt class CinderMixinTestCase(test.ScenarioTestCase): def setUp(self): super(CinderMixinTestCase, self).setUp() self.clients = mock.MagicMock() self.cinder = self.clients.cinder.return_value self.name_generator = uuid.uuid1 self.version = "some" self.service = FullCinder( clients=self.clients, name_generator=self.name_generator) self.service.version = self.version def atomic_actions(self): return self.service._atomic_actions def test__get_client(self): self.assertEqual(self.cinder, self.service._get_client()) def test__update_resource_with_manage(self): resource = mock.MagicMock(id=1, manager=mock.MagicMock()) self.assertEqual(resource.manager.get.return_value, self.service._update_resource(resource)) resource.manager.get.assert_called_once_with( resource.id) @ddt.data({"resource": block.Volume(id=1, name="vol", size=1, status="st"), "attr": "volumes"}, {"resource": block.VolumeSnapshot(id=2, name="snapshot", volume_id=1, status="st"), "attr": "volume_snapshots"}, {"resource": block.VolumeBackup(id=3, name="backup", volume_id=1, status="st"), "attr": "backups"}) @ddt.unpack def test__update_resource_with_no_manage(self, resource, attr): self.assertEqual(getattr(self.cinder, attr).get.return_value, self.service._update_resource(resource)) getattr(self.cinder, attr).get.assert_called_once_with( resource.id) def test__update_resource_with_not_found(self): manager = mock.MagicMock() resource = fakes.FakeResource(manager=manager, status="ERROR") class NotFoundException(Exception): http_status = 404 manager.get = mock.MagicMock(side_effect=NotFoundException) self.assertRaises(exceptions.GetResourceNotFound, self.service._update_resource, resource) def test__update_resource_with_http_exception(self): manager = mock.MagicMock() resource = fakes.FakeResource(manager=manager, status="ERROR") class HTTPException(Exception): pass manager.get = mock.MagicMock(side_effect=HTTPException) self.assertRaises(exceptions.GetResourceFailure, self.service._update_resource, resource) def test__wait_available_volume(self): volume = fakes.FakeVolume() self.assertEqual(self.mock_wait_for_status.mock.return_value, self.service._wait_available_volume(volume)) self.mock_wait_for_status.mock.assert_called_once_with( volume, ready_statuses=["available"], update_resource=self.service._update_resource, timeout=CONF.openstack.cinder_volume_create_timeout, check_interval=CONF.openstack.cinder_volume_create_poll_interval ) def test_get_volume(self): self.assertEqual(self.cinder.volumes.get.return_value, self.service.get_volume(1)) self.cinder.volumes.get.assert_called_once_with(1) @mock.patch("%s.block.BlockStorage.create_volume" % BASE_PATH) def test_delete_volume(self, mock_create_volume): volume = mock_create_volume.return_value self.service.delete_volume(volume) self.cinder.volumes.delete.assert_called_once_with(volume) self.mock_wait_for_status.mock.assert_called_once_with( volume, ready_statuses=["deleted"], check_deletion=True, update_resource=self.service._update_resource, timeout=CONF.openstack.cinder_volume_delete_timeout, check_interval=CONF.openstack.cinder_volume_delete_poll_interval ) @mock.patch("%s.block.BlockStorage.create_volume" % BASE_PATH) def test_extend_volume(self, mock_create_volume): volume = mock_create_volume.return_value self.service._wait_available_volume = mock.MagicMock() self.service._wait_available_volume.return_value = fakes.FakeVolume() self.assertEqual(self.service._wait_available_volume.return_value, self.service.extend_volume(volume, 1)) self.cinder.volumes.extend.assert_called_once_with(volume, 1) self.service._wait_available_volume.assert_called_once_with(volume) def test_list_snapshots(self): self.assertEqual(self.cinder.volume_snapshots.list.return_value, self.service.list_snapshots()) self.cinder.volume_snapshots.list.assert_called_once_with(True) def test_set_metadata(self): volume = fakes.FakeVolume() self.service.set_metadata(volume, sets=2, set_size=4) calls = self.cinder.volumes.set_metadata.call_args_list self.assertEqual(2, len(calls)) for call in calls: call_volume, metadata = call[0] self.assertEqual(volume, call_volume) self.assertEqual(4, len(metadata)) def test_delete_metadata(self): volume = fakes.FakeVolume() keys = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"] self.service.delete_metadata(volume, keys, deletes=3, delete_size=4) calls = self.cinder.volumes.delete_metadata.call_args_list self.assertEqual(3, len(calls)) all_deleted = [] for call in calls: call_volume, del_keys = call[0] self.assertEqual(volume, call_volume) self.assertEqual(4, len(del_keys)) for key in del_keys: self.assertIn(key, keys) self.assertNotIn(key, all_deleted) all_deleted.append(key) def test_delete_metadata_not_enough_keys(self): volume = fakes.FakeVolume() keys = ["a", "b", "c", "d", "e"] self.assertRaises(exceptions.InvalidArgumentsException, self.service.delete_metadata, volume, keys, deletes=2, delete_size=3) def test_update_readonly_flag(self): fake_volume = mock.MagicMock() self.service.update_readonly_flag(fake_volume, "fake_flag") self.cinder.volumes.update_readonly_flag.assert_called_once_with( fake_volume, "fake_flag") @mock.patch("rally_openstack.common.services.image.image.Image") def test_upload_volume_to_image(self, mock_image): volume = mock.Mock() image = {"os-volume_upload_image": {"image_id": 1}} self.cinder.volumes.upload_to_image.return_value = (None, image) glance = mock_image.return_value self.service.generate_random_name = mock.Mock( return_value="test_vol") self.service.upload_volume_to_image(volume, False, "container", "disk") self.cinder.volumes.upload_to_image.assert_called_once_with( volume, False, "test_vol", "container", "disk") self.mock_wait_for_status.mock.assert_has_calls([ mock.call( volume, ready_statuses=["available"], update_resource=self.service._update_resource, timeout=CONF.openstack.cinder_volume_create_timeout, check_interval=CONF.openstack. cinder_volume_create_poll_interval), mock.call( glance.get_image.return_value, ready_statuses=["active"], update_resource=glance.get_image, timeout=CONF.openstack.glance_image_create_timeout, check_interval=CONF.openstack. glance_image_create_poll_interval) ]) glance.get_image.assert_called_once_with(1) def test_create_qos(self): specs = {"consumer": "both", "write_iops_sec": "10", "read_iops_sec": "1000"} random_name = "random_name" self.service.generate_random_name = mock.MagicMock( return_value=random_name) result = self.service.create_qos(specs) self.assertEqual( self.cinder.qos_specs.create.return_value, result ) self.cinder.qos_specs.create.assert_called_once_with(random_name, specs) def test_list_qos(self): result = self.service.list_qos(True) self.assertEqual( self.cinder.qos_specs.list.return_value, result ) self.cinder.qos_specs.list.assert_called_once_with(True) def test_get_qos(self): result = self.service.get_qos("qos") self.assertEqual( self.cinder.qos_specs.get.return_value, result) self.cinder.qos_specs.get.assert_called_once_with("qos") def test_set_qos(self): set_specs_args = {"test": "foo"} result = self.service.set_qos("qos", set_specs_args) self.assertEqual( self.cinder.qos_specs.set_keys.return_value, result) self.cinder.qos_specs.set_keys.assert_called_once_with("qos", set_specs_args) def test_qos_associate_type(self): self.service.qos_associate_type("qos", "type_id") self.cinder.qos_specs.associate.assert_called_once_with( "qos", "type_id") def test_qos_disassociate_type(self): self.service.qos_disassociate_type("qos", "type_id") self.cinder.qos_specs.disassociate.assert_called_once_with( "qos", "type_id") def test_delete_snapshot(self): snapshot = mock.Mock() self.service.delete_snapshot(snapshot) self.cinder.volume_snapshots.delete.assert_called_once_with(snapshot) self.mock_wait_for_status.mock.assert_called_once_with( snapshot, ready_statuses=["deleted"], check_deletion=True, update_resource=self.service._update_resource, timeout=cfg.CONF.openstack.cinder_volume_create_timeout, check_interval=cfg.CONF.openstack .cinder_volume_create_poll_interval) def test_delete_backup(self): backup = mock.Mock() self.service.delete_backup(backup) self.cinder.backups.delete.assert_called_once_with(backup) self.mock_wait_for_status.mock.assert_called_once_with( backup, ready_statuses=["deleted"], check_deletion=True, update_resource=self.service._update_resource, timeout=cfg.CONF.openstack.cinder_volume_create_timeout, check_interval=cfg.CONF.openstack .cinder_volume_create_poll_interval) def test_restore_backup(self): backup = mock.Mock() self.service._wait_available_volume = mock.MagicMock() self.service._wait_available_volume.return_value = mock.Mock() return_restore = self.service.restore_backup(backup.id, None) self.cinder.restores.restore.assert_called_once_with(backup.id, None) self.cinder.volumes.get.assert_called_once_with( self.cinder.restores.restore.return_value.volume_id) self.service._wait_available_volume.assert_called_once_with( self.cinder.volumes.get.return_value) self.assertEqual(self.service._wait_available_volume.return_value, return_restore) def test_list_backups(self): return_backups_list = self.service.list_backups() self.assertEqual( self.cinder.backups.list.return_value, return_backups_list) def test_list_transfers(self): return_transfers_list = self.service.list_transfers() self.assertEqual( self.cinder.transfers.list.return_value, return_transfers_list) def test_get_volume_type(self): self.assertEqual(self.cinder.volume_types.get.return_value, self.service.get_volume_type("volume_type")) self.cinder.volume_types.get.assert_called_once_with( "volume_type") def test_delete_volume_type(self): volume_type = mock.Mock() self.service.delete_volume_type(volume_type) self.cinder.volume_types.delete.assert_called_once_with( volume_type) def test_set_volume_type_keys(self): volume_type = mock.Mock() self.assertEqual(volume_type.set_keys.return_value, self.service.set_volume_type_keys( volume_type, metadata="metadata")) volume_type.set_keys.assert_called_once_with("metadata") def test_transfer_create(self): fake_volume = mock.MagicMock() random_name = "random_name" self.service.generate_random_name = mock.MagicMock( return_value=random_name) result = self.service.transfer_create(fake_volume.id) self.assertEqual( self.cinder.transfers.create.return_value, result) self.cinder.transfers.create.assert_called_once_with( fake_volume.id, name=random_name) def test_transfer_create_with_name(self): fake_volume = mock.MagicMock() result = self.service.transfer_create(fake_volume.id, name="t") self.assertEqual( self.cinder.transfers.create.return_value, result) self.cinder.transfers.create.assert_called_once_with( fake_volume.id, name="t") def test_transfer_accept(self): fake_transfer = mock.MagicMock() result = self.service.transfer_accept(fake_transfer.id, "fake_key") self.assertEqual( self.cinder.transfers.accept.return_value, result) self.cinder.transfers.accept.assert_called_once_with( fake_transfer.id, "fake_key") def test_create_encryption_type(self): volume_type = mock.Mock() specs = { "provider": "foo_pro", "cipher": "foo_cip", "key_size": 512, "control_location": "foo_con" } result = self.service.create_encryption_type(volume_type, specs) self.assertEqual( self.cinder.volume_encryption_types.create.return_value, result) self.cinder.volume_encryption_types.create.assert_called_once_with( volume_type, specs) def test_get_encryption_type(self): volume_type = mock.Mock() result = self.service.get_encryption_type(volume_type) self.assertEqual( self.cinder.volume_encryption_types.get.return_value, result) self.cinder.volume_encryption_types.get.assert_called_once_with( volume_type) def test_list_encryption_type(self): return_encryption_types_list = self.service.list_encryption_type() self.assertEqual(self.cinder.volume_encryption_types.list.return_value, return_encryption_types_list) def test_delete_encryption_type(self): resp = mock.MagicMock(status_code=202) self.cinder.volume_encryption_types.delete.return_value = [resp] self.service.delete_encryption_type("type") self.cinder.volume_encryption_types.delete.assert_called_once_with( "type") def test_delete_encryption_type_raise(self): resp = mock.MagicMock(status_code=404) self.cinder.volume_encryption_types.delete.return_value = [resp] self.assertRaises(exceptions.RallyException, self.service.delete_encryption_type, "type") self.cinder.volume_encryption_types.delete.assert_called_once_with( "type") def test_update_encryption_type(self): volume_type = mock.Mock() specs = { "provider": "foo_pro", "cipher": "foo_cip", "key_size": 512, "control_location": "foo_con" } result = self.service.update_encryption_type(volume_type, specs) self.assertEqual( self.cinder.volume_encryption_types.update.return_value, result) self.cinder.volume_encryption_types.update.assert_called_once_with( volume_type, specs) class FullUnifiedCinder(cinder_common.UnifiedCinderMixin, service.Service): """Implementation of UnifiedCinderMixin with Service base class.""" pass class UnifiedCinderMixinTestCase(test.TestCase): def setUp(self): super(UnifiedCinderMixinTestCase, self).setUp() self.clients = mock.MagicMock() self.name_generator = mock.MagicMock() self.impl = mock.MagicMock() self.version = "some" self.service = FullUnifiedCinder( clients=self.clients, name_generator=self.name_generator) self.service._impl = self.impl self.service.version = self.version def test__unify_backup(self): class SomeBackup(object): id = 1 name = "backup" volume_id = "volume" status = "st" backup = self.service._unify_backup(SomeBackup()) self.assertEqual(1, backup.id) self.assertEqual("backup", backup.name) self.assertEqual("volume", backup.volume_id) self.assertEqual("st", backup.status) def test__unify_transfer(self): class SomeTransfer(object): id = 1 name = "transfer" volume_id = "volume" status = "st" transfer = self.service._unify_backup(SomeTransfer()) self.assertEqual(1, transfer.id) self.assertEqual("transfer", transfer.name) self.assertEqual("volume", transfer.volume_id) self.assertEqual("st", transfer.status) def test__unify_qos(self): class Qos(object): id = 1 name = "qos" specs = {"key1": "value1"} qos = self.service._unify_qos(Qos()) self.assertEqual(1, qos.id) self.assertEqual("qos", qos.name) self.assertEqual({"key1": "value1"}, qos.specs) def test__unify_encryption_type(self): class SomeEncryptionType(object): encryption_id = 1 volume_type_id = "volume_type" encryption_type = self.service._unify_encryption_type( SomeEncryptionType()) self.assertEqual(1, encryption_type.id) self.assertEqual("volume_type", encryption_type.volume_type_id) def test_delete_volume(self): self.service.delete_volume("volume") self.service._impl.delete_volume.assert_called_once_with("volume") def test_set_metadata(self): self.assertEqual( self.service._impl.set_metadata.return_value, self.service.set_metadata("volume", sets=10, set_size=3)) self.service._impl.set_metadata.assert_called_once_with( "volume", set_size=3, sets=10) def test_delete_metadata(self): keys = ["a", "b"] self.service.delete_metadata("volume", keys=keys, deletes=10, delete_size=3) self.service._impl.delete_metadata.assert_called_once_with( "volume", keys=keys, delete_size=3, deletes=10) def test_update_readonly_flag(self): self.assertEqual( self.service._impl.update_readonly_flag.return_value, self.service.update_readonly_flag("volume", read_only=True)) self.service._impl.update_readonly_flag.assert_called_once_with( "volume", read_only=True) def test_upload_volume_to_image(self): self.assertEqual( self.service._impl.upload_volume_to_image.return_value, self.service.upload_volume_to_image("volume", force=False, container_format="bare", disk_format="raw")) self.service._impl.upload_volume_to_image.assert_called_once_with( "volume", container_format="bare", disk_format="raw", force=False) def test_create_qos(self): specs = {"consumer": "both", "write_iops_sec": "10", "read_iops_sec": "1000"} self.service._unify_qos = mock.MagicMock() self.assertEqual( self.service._unify_qos.return_value, self.service.create_qos(specs) ) self.service._impl.create_qos.assert_called_once_with(specs) self.service._unify_qos.assert_called_once_with( self.service._impl.create_qos.return_value ) def test_list_qos(self): self.service._unify_qos = mock.MagicMock() self.service._impl.list_qos.return_value = ["qos"] self.assertEqual( [self.service._unify_qos.return_value], self.service.list_qos(True) ) self.service._impl.list_qos.assert_called_once_with(True) self.service._unify_qos.assert_called_once_with("qos") def test_get_qos(self): self.service._unify_qos = mock.MagicMock() self.assertEqual( self.service._unify_qos.return_value, self.service.get_qos("qos")) self.service._impl.get_qos.assert_called_once_with("qos") self.service._unify_qos.assert_called_once_with( self.service._impl.get_qos.return_value ) def test_set_qos(self): set_specs_args = {"test": "foo"} self.service._unify_qos = mock.MagicMock() qos = mock.MagicMock() self.assertEqual( self.service._unify_qos.return_value, self.service.set_qos(qos, set_specs_args)) self.service._impl.set_qos.assert_called_once_with(qos.id, set_specs_args) self.service._unify_qos.assert_called_once_with(qos) def test_qos_associate_type(self): self.service._unify_qos = mock.MagicMock() self.assertEqual( self.service._unify_qos.return_value, self.service.qos_associate_type("qos", "type_id")) self.service._impl.qos_associate_type.assert_called_once_with( "qos", "type_id") self.service._unify_qos.assert_called_once_with("qos") def test_qos_disassociate_type(self): self.service._unify_qos = mock.MagicMock() self.assertEqual( self.service._unify_qos.return_value, self.service.qos_disassociate_type("qos", "type_id")) self.service._impl.qos_disassociate_type.assert_called_once_with( "qos", "type_id") self.service._unify_qos.assert_called_once_with("qos") def test_delete_snapshot(self): self.service.delete_snapshot("snapshot") self.service._impl.delete_snapshot.assert_called_once_with("snapshot") def test_delete_backup(self): self.service.delete_backup("backup") self.service._impl.delete_backup.assert_called_once_with("backup") def test_list_backups(self): self.service._unify_backup = mock.MagicMock() self.service._impl.list_backups.return_value = ["backup"] self.assertEqual([self.service._unify_backup.return_value], self.service.list_backups(detailed=True)) self.service._impl.list_backups.assert_called_once_with(detailed=True) self.service._unify_backup.assert_called_once_with( "backup") def test_list_transfers(self): self.service._unify_transfer = mock.MagicMock() self.service._impl.list_transfers.return_value = ["transfer"] self.assertEqual( [self.service._unify_transfer.return_value], self.service.list_transfers(detailed=True, search_opts=None)) self.service._impl.list_transfers.assert_called_once_with( detailed=True, search_opts=None) self.service._unify_transfer.assert_called_once_with( "transfer") def test_update_volume_type(self): self.assertEqual(self.service._impl.update_volume_type.return_value, self.service.update_volume_type("volume_type")) self.service._impl.update_volume_type.assert_called_once_with( volume_type="volume_type", name=None, description=None, is_public=None ) def test_get_volume_type(self): self.assertEqual(self.service._impl.get_volume_type.return_value, self.service.get_volume_type("volume_type")) self.service._impl.get_volume_type.assert_called_once_with( "volume_type") def test_delete_volume_type(self): self.assertEqual(self.service._impl.delete_volume_type.return_value, self.service.delete_volume_type("volume_type")) self.service._impl.delete_volume_type.assert_called_once_with( "volume_type") def test_add_type_access(self): self.assertEqual(self.service._impl.add_type_access.return_value, self.service.add_type_access(volume_type="some_type", project="some_project")) self.service._impl.add_type_access.assert_called_once_with( volume_type="some_type", project="some_project") def test_list_type_access(self): self.assertEqual(self.service._impl.list_type_access.return_value, self.service.list_type_access("some_type")) self.service._impl.list_type_access.assert_called_once_with( "some_type") def test_set_volume_type_keys(self): self.assertEqual(self.service._impl.set_volume_type_keys.return_value, self.service.set_volume_type_keys( "volume_type", metadata="metadata")) self.service._impl.set_volume_type_keys.assert_called_once_with( "volume_type", "metadata") def test_transfer_create(self): self.service._unify_transfer = mock.MagicMock() self.assertEqual(self.service._unify_transfer.return_value, self.service.transfer_create(1)) self.service._impl.transfer_create.assert_called_once_with( 1, name=None) self.service._unify_transfer.assert_called_once_with( self.service._impl.transfer_create.return_value) def test_transfer_accept(self): self.service._unify_transfer = mock.MagicMock() self.assertEqual(self.service._unify_transfer.return_value, self.service.transfer_accept(1, auth_key=2)) self.service._impl.transfer_accept.assert_called_once_with( 1, auth_key=2) self.service._unify_transfer.assert_called_once_with( self.service._impl.transfer_accept.return_value) def test_create_encryption_type(self): self.service._unify_encryption_type = mock.MagicMock() self.assertEqual( self.service._unify_encryption_type.return_value, self.service.create_encryption_type("type", specs=2)) self.service._impl.create_encryption_type.assert_called_once_with( "type", specs=2) self.service._unify_encryption_type.assert_called_once_with( self.service._impl.create_encryption_type.return_value) def test_get_encryption_type(self): self.service._unify_encryption_type = mock.MagicMock() self.assertEqual( self.service._unify_encryption_type.return_value, self.service.get_encryption_type("type")) self.service._impl.get_encryption_type.assert_called_once_with( "type") self.service._unify_encryption_type.assert_called_once_with( self.service._impl.get_encryption_type.return_value) def test_list_encryption_type(self): self.service._unify_encryption_type = mock.MagicMock() self.service._impl.list_encryption_type.return_value = ["encryption"] self.assertEqual([self.service._unify_encryption_type.return_value], self.service.list_encryption_type(search_opts=None)) self.service._impl.list_encryption_type.assert_called_once_with( search_opts=None) self.service._unify_encryption_type.assert_called_once_with( "encryption") def test_delete_encryption_type(self): self.service.delete_encryption_type("type") self.service._impl.delete_encryption_type.assert_called_once_with( "type") def test_update_encryption_type(self): self.service.update_encryption_type("type", specs=3) self.service._impl.update_encryption_type.assert_called_once_with( "type", specs=3)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,788
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/neutron/loadbalancer_v2.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.task import validation from rally_openstack.common import consts from rally_openstack.task import scenario from rally_openstack.task.scenarios.neutron import utils """Scenarios for Neutron Loadbalancer v2.""" @validation.add("required_neutron_extensions", extensions=["lbaasv2"]) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", users=True) @validation.add("required_contexts", contexts=("network")) @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronLoadbalancerV2.create_and_list_loadbalancers", platform="openstack") class CreateAndListLoadbalancers(utils.NeutronScenario): def run(self, lb_create_args=None): """Create a loadbalancer(v2) and then list loadbalancers(v2). Measure the "neutron lbaas-loadbalancer-list" command performance. The scenario creates a loadbalancer for every subnet and then lists loadbalancers. :param lb_create_args: dict, POST /lbaas/loadbalancers request options """ lb_create_args = lb_create_args or {} subnets = [] networks = self.context.get("tenant", {}).get("networks", []) for network in networks: subnets.extend(network.get("subnets", [])) for subnet_id in subnets: self._create_lbaasv2_loadbalancer(subnet_id, **lb_create_args) self._list_lbaasv2_loadbalancers()
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,789
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/nova/utils.py
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import cfg from rally.common import logging from rally import exceptions from rally.task import atomic from rally.task import utils from rally_openstack.common.services.image import image as image_service from rally_openstack.task import scenario from rally_openstack.task.scenarios.cinder import utils as cinder_utils from rally_openstack.task.scenarios.neutron import utils as neutron_utils CONF = cfg.CONF LOG = logging.getLogger(__file__) class NovaScenario(neutron_utils.NeutronBaseScenario, scenario.OpenStackScenario): """Base class for Nova scenarios with basic atomic actions.""" @atomic.action_timer("nova.list_servers") def _list_servers(self, detailed=True): """Returns user servers list.""" return self.clients("nova").servers.list(detailed) def _pick_random_nic(self): """Choose one network from existing ones.""" ctxt = self.context nets = [net["id"] for net in ctxt.get("tenant", {}).get("networks", [])] if nets: # NOTE(amaretskiy): Balance servers among networks. net_idx = self.context["iteration"] % len(nets) return [{"net-id": nets[net_idx]}] def _get_network_id(self, net_name): networks = getattr(self, "existed_networks", []) if not networks: networks = self.clients("neutron").list_networks()["networks"] self.existed_networks = networks for net in networks: if net["name"] == net_name: return net["id"] raise exceptions.NotFoundException( message="Network %s not found." % net_name) def _boot_server(self, image, flavor, auto_assign_nic=False, **kwargs): """Boot a server. Returns when the server is actually booted and in "ACTIVE" state. If multiple networks created by Network context are present, the first network found that isn't associated with a floating IP pool is used. :param image: image ID or instance for server creation :param flavor: int, flavor ID or instance for server creation :param auto_assign_nic: bool, whether or not to auto assign NICs :param kwargs: other optional parameters to initialize the server :returns: nova Server instance """ server_name = self.generate_random_name() secgroup = self.context.get("user", {}).get("secgroup") if secgroup: if "security_groups" not in kwargs: kwargs["security_groups"] = [secgroup["name"]] elif secgroup["name"] not in kwargs["security_groups"]: kwargs["security_groups"].append(secgroup["name"]) if auto_assign_nic and not kwargs.get("nics", False): nic = self._pick_random_nic() if nic: kwargs["nics"] = nic if "nics" not in kwargs and\ "tenant" in self.context and\ "networks" in self.context["tenant"]: kwargs["nics"] = [ {"net-id": self.context["tenant"]["networks"][0]["id"]}] for nic in kwargs.get("nics", []): if not nic.get("net-id") and nic.get("net-name"): nic["net-id"] = self._get_network_id(nic["net-name"]) with atomic.ActionTimer(self, "nova.boot_server"): server = self.clients("nova").servers.create( server_name, image, flavor, **kwargs) self.sleep_between(CONF.openstack.nova_server_boot_prepoll_delay) server = utils.wait_for_status( server, ready_statuses=["ACTIVE"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_boot_timeout, check_interval=CONF.openstack.nova_server_boot_poll_interval ) return server def _do_server_reboot(self, server, reboottype): server.reboot(reboot_type=reboottype) self.sleep_between(CONF.openstack.nova_server_pause_prepoll_delay) utils.wait_for_status( server, ready_statuses=["ACTIVE"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_reboot_timeout, check_interval=CONF.openstack.nova_server_reboot_poll_interval ) @atomic.action_timer("nova.soft_reboot_server") def _soft_reboot_server(self, server): """Reboot a server with soft reboot. A soft reboot will be issued on the given server upon which time this method will wait for the server to become active. :param server: The server to reboot. """ self._do_server_reboot(server, "SOFT") @atomic.action_timer("nova.show_server") def _show_server(self, server): """Show server details. :param server: The server to get details for. :returns: Server details """ return self.clients("nova").servers.get(server) @atomic.action_timer("nova.get_console_output_server") def _get_server_console_output(self, server, length=None): """Get text of a console log output from a server. :param server: The server whose console output to retrieve :param length: The number of tail log lines you would like to retrieve. :returns: Text console output from server """ return self.clients("nova").servers.get_console_output(server, length=length) @atomic.action_timer("nova.get_console_url_server") def _get_console_url_server(self, server, console_type): """Retrieve a console url of a server. :param server: server to get console url for :param console_type: type can be novnc/xvpvnc for protocol vnc; spice-html5 for protocol spice; rdp-html5 for protocol rdp; serial for protocol serial. webmks for protocol mks (since version 2.8). :returns: An instance of novaclient.base.DictWithMeta """ return self.clients("nova").servers.get_console_url(server, console_type) @atomic.action_timer("nova.reboot_server") def _reboot_server(self, server): """Reboot a server with hard reboot. A reboot will be issued on the given server upon which time this method will wait for the server to become active. :param server: The server to reboot. """ self._do_server_reboot(server, "HARD") @atomic.action_timer("nova.rebuild_server") def _rebuild_server(self, server, image, **kwargs): """Rebuild a server with a new image. :param server: The server to rebuild. :param image: The new image to rebuild the server with. :param kwargs: Optional additional arguments to pass to the rebuild """ server.rebuild(image, **kwargs) self.sleep_between(CONF.openstack.nova_server_rebuild_prepoll_delay) utils.wait_for_status( server, ready_statuses=["ACTIVE"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_rebuild_timeout, check_interval=CONF.openstack.nova_server_rebuild_poll_interval ) @atomic.action_timer("nova.start_server") def _start_server(self, server): """Start the given server. A start will be issued for the given server upon which time this method will wait for it to become ACTIVE. :param server: The server to start and wait to become ACTIVE. """ server.start() utils.wait_for_status( server, ready_statuses=["ACTIVE"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_start_timeout, check_interval=CONF.openstack.nova_server_start_poll_interval ) @atomic.action_timer("nova.stop_server") def _stop_server(self, server): """Stop the given server. Issues a stop on the given server and waits for the server to become SHUTOFF. :param server: The server to stop. """ server.stop() utils.wait_for_status( server, ready_statuses=["SHUTOFF"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_stop_timeout, check_interval=CONF.openstack.nova_server_stop_poll_interval ) @atomic.action_timer("nova.rescue_server") def _rescue_server(self, server): """Rescue the given server. Returns when the server is actually rescue and is in the "Rescue" state. :param server: Server object """ server.rescue() self.sleep_between(CONF.openstack.nova_server_rescue_prepoll_delay) utils.wait_for_status( server, ready_statuses=["RESCUE"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_rescue_timeout, check_interval=CONF.openstack.nova_server_rescue_poll_interval ) @atomic.action_timer("nova.unrescue_server") def _unrescue_server(self, server): """Unrescue the given server. Returns when the server is unrescue and waits to become ACTIVE :param server: Server object """ server.unrescue() self.sleep_between(CONF.openstack.nova_server_unrescue_prepoll_delay) utils.wait_for_status( server, ready_statuses=["ACTIVE"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_unrescue_timeout, check_interval=CONF.openstack.nova_server_unrescue_poll_interval ) @atomic.action_timer("nova.suspend_server") def _suspend_server(self, server): """Suspends the given server. Returns when the server is actually suspended and is in the "Suspended" state. :param server: Server object """ server.suspend() self.sleep_between(CONF.openstack.nova_server_suspend_prepoll_delay) utils.wait_for_status( server, ready_statuses=["SUSPENDED"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_suspend_timeout, check_interval=CONF.openstack.nova_server_suspend_poll_interval ) @atomic.action_timer("nova.resume_server") def _resume_server(self, server): """Resumes the suspended server. Returns when the server is actually resumed and is in the "ACTIVE" state. :param server: Server object """ server.resume() self.sleep_between(CONF.openstack.nova_server_resume_prepoll_delay) utils.wait_for_status( server, ready_statuses=["ACTIVE"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_resume_timeout, check_interval=CONF.openstack.nova_server_resume_poll_interval ) @atomic.action_timer("nova.pause_server") def _pause_server(self, server): """Pause the live server. Returns when the server is actually paused and is in the "PAUSED" state. :param server: Server object """ server.pause() self.sleep_between(CONF.openstack.nova_server_pause_prepoll_delay) utils.wait_for_status( server, ready_statuses=["PAUSED"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_pause_timeout, check_interval=CONF.openstack.nova_server_pause_poll_interval ) @atomic.action_timer("nova.unpause_server") def _unpause_server(self, server): """Unpause the paused server. Returns when the server is actually unpaused and is in the "ACTIVE" state. :param server: Server object """ server.unpause() self.sleep_between(CONF.openstack.nova_server_pause_prepoll_delay) utils.wait_for_status( server, ready_statuses=["ACTIVE"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_unpause_timeout, check_interval=CONF.openstack.nova_server_unpause_poll_interval ) @atomic.action_timer("nova.shelve_server") def _shelve_server(self, server): """Shelve the given server. Returns when the server is actually shelved and is in the "SHELVED_OFFLOADED" state. :param server: Server object """ server.shelve() self.sleep_between(CONF.openstack.nova_server_pause_prepoll_delay) utils.wait_for_status( server, ready_statuses=["SHELVED_OFFLOADED"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_shelve_timeout, check_interval=CONF.openstack.nova_server_shelve_poll_interval ) utils.wait_for_status( server, ready_statuses=["None"], status_attr="OS-EXT-STS:task_state", update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_shelve_timeout, check_interval=CONF.openstack.nova_server_shelve_poll_interval ) @atomic.action_timer("nova.unshelve_server") def _unshelve_server(self, server): """Unshelve the given server. Returns when the server is unshelved and is in the "ACTIVE" state. :param server: Server object """ server.unshelve() self.sleep_between(CONF.openstack. nova_server_unshelve_prepoll_delay) utils.wait_for_status( server, ready_statuses=["ACTIVE"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_unshelve_timeout, check_interval=CONF.openstack.nova_server_unshelve_poll_interval ) def _delete_server(self, server, force=False): """Delete the given server. Returns when the server is actually deleted. :param server: Server object :param force: If True, force_delete will be used instead of delete. """ atomic_name = ("nova.%sdelete_server") % (force and "force_" or "") with atomic.ActionTimer(self, atomic_name): if force: server.force_delete() else: server.delete() utils.wait_for_status( server, ready_statuses=["deleted"], check_deletion=True, update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_delete_timeout, check_interval=CONF.openstack.nova_server_delete_poll_interval ) def _delete_servers(self, servers, force=False): """Delete multiple servers. :param servers: A list of servers to delete :param force: If True, force_delete will be used instead of delete. """ atomic_name = ("nova.%sdelete_servers") % (force and "force_" or "") with atomic.ActionTimer(self, atomic_name): for server in servers: if force: server.force_delete() else: server.delete() for server in servers: utils.wait_for_status( server, ready_statuses=["deleted"], check_deletion=True, update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_delete_timeout, check_interval=( CONF.openstack.nova_server_delete_poll_interval) ) @atomic.action_timer("nova.create_server_group") def _create_server_group(self, **kwargs): """Create (allocate) a server group. :param kwargs: Optional additional arguments for Server group creating :returns: Nova server group """ group_name = self.generate_random_name() return self.clients("nova").server_groups.create(name=group_name, **kwargs) @atomic.action_timer("nova.get_server_group") def _get_server_group(self, id): """Get a specific server group. :param id: Unique ID of the server group to get :rtype: :class:`ServerGroup` """ return self.clients("nova").server_groups.get(id) @atomic.action_timer("nova.list_server_groups") def _list_server_groups(self, all_projects=False): """Get a list of all server groups. :param all_projects: If True, display server groups from all projects(Admin only) :rtype: list of :class:`ServerGroup`. """ if all_projects: return self.admin_clients("nova").server_groups.list(all_projects) else: return self.clients("nova").server_groups.list(all_projects) @atomic.action_timer("nova.delete_server_group") def _delete_server_group(self, group_id): """Delete a specific server group. :param id: The ID of the :class:`ServerGroup` to delete :returns: An instance of novaclient.base.TupleWithMeta """ return self.clients("nova").server_groups.delete(group_id) @atomic.action_timer("nova.delete_image") def _delete_image(self, image): """Delete the given image. Returns when the image is actually deleted. :param image: Image object """ LOG.warning("Method '_delete_image' of NovaScenario class is " "deprecated since Rally 0.10.0. Use GlanceUtils instead.") glance = image_service.Image(self._clients, atomic_inst=self.atomic_actions()) glance.delete_image(image.id) check_interval = CONF.openstack.nova_server_image_delete_poll_interval with atomic.ActionTimer(self, "glance.wait_for_delete"): utils.wait_for_status( image, ready_statuses=["deleted", "pending_delete"], check_deletion=True, update_resource=glance.get_image, timeout=CONF.openstack.nova_server_image_delete_timeout, check_interval=check_interval ) @atomic.action_timer("nova.snapshot_server") def _create_image(self, server): """Create an image from the given server Uses the server name to name the created image. Returns when the image is actually created and is in the "Active" state. :param server: Server object for which the image will be created :returns: Created image object """ image_uuid = self.clients("nova").servers.create_image(server, server.name) glance = image_service.Image(self._clients, atomic_inst=self.atomic_actions()) image = glance.get_image(image_uuid) check_interval = CONF.openstack.nova_server_image_create_poll_interval with atomic.ActionTimer(self, "glance.wait_for_image"): image = utils.wait_for_status( image, ready_statuses=["ACTIVE"], update_resource=glance.get_image, timeout=CONF.openstack.nova_server_image_create_timeout, check_interval=check_interval ) with atomic.ActionTimer(self, "nova.wait_for_server"): utils.wait_for_status( server, ready_statuses=["None"], status_attr="OS-EXT-STS:task_state", update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_image_create_timeout, check_interval=check_interval ) return image @atomic.action_timer("nova.get_keypair") def _get_keypair(self, keypair): """Get a keypair. :param keypair: The ID of the keypair to get. :rtype: :class:`Keypair` """ return self.clients("nova").keypairs.get(keypair) @atomic.action_timer("nova.create_keypair") def _create_keypair(self, **kwargs): """Create a keypair :returns: Created keypair name """ keypair_name = self.generate_random_name() keypair = self.clients("nova").keypairs.create(keypair_name, **kwargs) return keypair.name @atomic.action_timer("nova.list_keypairs") def _list_keypairs(self): """Return user keypairs list.""" return self.clients("nova").keypairs.list() @atomic.action_timer("nova.delete_keypair") def _delete_keypair(self, keypair_name): """Delete keypair :param keypair_name: The keypair name to delete. """ self.clients("nova").keypairs.delete(keypair_name) def _boot_servers(self, image_id, flavor_id, requests, instances_amount=1, auto_assign_nic=False, **kwargs): """Boot multiple servers. Returns when all the servers are actually booted and are in the "Active" state. :param image_id: ID of the image to be used for server creation :param flavor_id: ID of the flavor to be used for server creation :param requests: Number of booting requests to perform :param instances_amount: Number of instances to boot per each request :param auto_assign_nic: bool, whether or not to auto assign NICs :param kwargs: other optional parameters to initialize the servers :returns: List of created server objects """ if auto_assign_nic and not kwargs.get("nics", False): nic = self._pick_random_nic() if nic: kwargs["nics"] = nic for nic in kwargs.get("nics", []): if not nic.get("net-id") and nic.get("net-name"): nic["net-id"] = self._get_network_id(nic["net-name"]) name_prefix = self.generate_random_name() with atomic.ActionTimer(self, "nova.boot_servers"): for i in range(requests): self.clients("nova").servers.create( "%s_%d" % (name_prefix, i), image_id, flavor_id, min_count=instances_amount, max_count=instances_amount, **kwargs) # NOTE(msdubov): Nova python client returns only one server even # when min_count > 1, so we have to rediscover # all the created servers manually. servers = [s for s in self.clients("nova").servers.list() if s.name.startswith(name_prefix)] self.sleep_between(CONF.openstack.nova_server_boot_prepoll_delay) servers = [utils.wait_for_status( server, ready_statuses=["ACTIVE"], update_resource=utils. get_from_manager(), timeout=CONF.openstack.nova_server_boot_timeout, check_interval=CONF.openstack.nova_server_boot_poll_interval ) for server in servers] return servers @atomic.action_timer("nova.associate_floating_ip") def _associate_floating_ip(self, server, address, fixed_address=None): """Add floating IP to an instance :param server: The :class:`Server` to add an IP to. :param address: The dict-like representation of FloatingIP to add to the instance :param fixed_address: The fixedIP address the FloatingIP is to be associated with (optional) """ if isinstance(address, dict): floating_ip = self.neutron.associate_floatingip( device_id=server.id, fixed_ip_address=fixed_address, floatingip_id=address["id"]) else: floating_ip = self.neutron.associate_floatingip( device_id=server.id, fixed_ip_address=fixed_address, floating_ip_address=address) utils.wait_for(server, is_ready=self.check_ip_address( floating_ip["floating_ip_address"]), update_resource=utils.get_from_manager()) # Update server data server.addresses = server.manager.get(server.id).addresses @atomic.action_timer("nova.dissociate_floating_ip") def _dissociate_floating_ip(self, server, address): """Remove floating IP from an instance :param server: The :class:`Server` to add an IP to. :param address: The dict-like representation of FloatingIP to remove """ if isinstance(address, dict): floating_ip = self.neutron.dissociate_floatingip( floatingip_id=address["id"] ) else: floating_ip = self.neutron.dissociate_floatingip( floating_ip_address=address ) utils.wait_for( server, is_ready=self.check_ip_address( floating_ip["floating_ip_address"], must_exist=False), update_resource=utils.get_from_manager() ) # Update server data server.addresses = server.manager.get(server.id).addresses @staticmethod def check_ip_address(address, must_exist=True): ip_to_check = getattr(address, "ip", address) def _check_addr(resource): for network, addr_list in resource.addresses.items(): for addr in addr_list: if ip_to_check == addr["addr"]: return must_exist return not must_exist return _check_addr @atomic.action_timer("nova.resize") def _resize(self, server, flavor): server.resize(flavor) utils.wait_for_status( server, ready_statuses=["VERIFY_RESIZE"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_resize_timeout, check_interval=CONF.openstack.nova_server_resize_poll_interval ) @atomic.action_timer("nova.resize_confirm") def _resize_confirm(self, server, status="ACTIVE"): server.confirm_resize() utils.wait_for_status( server, ready_statuses=[status], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_resize_confirm_timeout, check_interval=( CONF.openstack.nova_server_resize_confirm_poll_interval) ) @atomic.action_timer("nova.resize_revert") def _resize_revert(self, server, status="ACTIVE"): server.revert_resize() utils.wait_for_status( server, ready_statuses=[status], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_resize_revert_timeout, check_interval=( CONF.openstack.nova_server_resize_revert_poll_interval) ) def _update_volume_resource(self, resource): cinder_service = cinder_utils.CinderBasic(self.context) return cinder_service.cinder.get_volume(resource.id) @atomic.action_timer("nova.attach_volume") def _attach_volume(self, server, volume, device=None): server_id = server.id volume_id = volume.id attachment = self.clients("nova").volumes.create_server_volume( server_id, volume_id, device) utils.wait_for_status( volume, ready_statuses=["in-use"], update_resource=self._update_volume_resource, timeout=CONF.openstack.nova_server_resize_revert_timeout, check_interval=( CONF.openstack.nova_server_resize_revert_poll_interval) ) return attachment @atomic.action_timer("nova.list_attachments") def _list_attachments(self, server_id): """Get a list of all the attached volumes for the given server ID. :param server_id: The ID of the server :rtype: list of :class:`Volume` """ return self.clients("nova").volumes.get_server_volumes(server_id) @atomic.action_timer("nova.detach_volume") def _detach_volume(self, server, volume, attachment=None): """Detach volume from the server. :param server: A server object to detach volume from. :param volume: A volume object to detach from the server. :param attachment: DEPRECATED """ if attachment: LOG.warning("An argument `attachment` of `_detach_volume` is " "deprecated in favor of `volume` argument since " "Rally 0.10.0") server_id = server.id self.clients("nova").volumes.delete_server_volume(server_id, volume.id) utils.wait_for_status( volume, ready_statuses=["available"], update_resource=self._update_volume_resource, timeout=CONF.openstack.nova_detach_volume_timeout, check_interval=CONF.openstack.nova_detach_volume_poll_interval ) @atomic.action_timer("nova.live_migrate") def _live_migrate(self, server, block_migration=False, disk_over_commit=False, skip_compute_nodes_check=False, skip_host_check=False): """Run live migration of the given server. :param server: Server object :param block_migration: Specifies the migration type :param disk_over_commit: Specifies whether to overcommit migrated instance or not :param skip_compute_nodes_check: Specifies whether to verify the number of compute nodes :param skip_host_check: Specifies whether to verify the targeted host availability """ if not skip_compute_nodes_check: compute_nodes = len(self._list_hypervisors()) if compute_nodes < 2: raise exceptions.RallyException("Less than 2 compute nodes," " skipping Live Migration") server_admin = self.admin_clients("nova").servers.get(server.id) host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") server_admin.live_migrate(block_migration=block_migration, disk_over_commit=disk_over_commit) utils.wait_for_status( server, ready_statuses=["ACTIVE"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_live_migrate_timeout, check_interval=( CONF.openstack.nova_server_live_migrate_poll_interval) ) if not skip_host_check: server_admin = self.admin_clients("nova").servers.get(server.id) host_after_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") if host_pre_migrate == host_after_migrate: raise exceptions.RallyException( "Live Migration failed: Migration complete " "but instance did not change host: %s" % host_pre_migrate) @atomic.action_timer("nova.migrate") def _migrate(self, server, skip_compute_nodes_check=False, skip_host_check=False): """Run migration of the given server. :param server: Server object :param skip_compute_nodes_check: Specifies whether to verify the number of compute nodes :param skip_host_check: Specifies whether to verify the targeted host availability """ if not skip_compute_nodes_check: compute_nodes = len(self._list_hypervisors()) if compute_nodes < 2: raise exceptions.RallyException("Less than 2 compute nodes," " skipping Migration") server_admin = self.admin_clients("nova").servers.get(server.id) host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") server_admin.migrate() utils.wait_for_status( server, ready_statuses=["VERIFY_RESIZE"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_migrate_timeout, check_interval=( CONF.openstack.nova_server_migrate_poll_interval) ) if not skip_host_check: server_admin = self.admin_clients("nova").servers.get(server.id) host_after_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") if host_pre_migrate == host_after_migrate: raise exceptions.RallyException( "Migration failed: Migration complete but instance" " did not change host: %s" % host_pre_migrate) @atomic.action_timer("nova.add_server_secgroups") def _add_server_secgroups(self, server, security_group, atomic_action=False): """add security group to a server. :param server: Server object :returns: An instance of novaclient.base.DictWithMeta """ return self.clients("nova").servers.add_security_group(server, security_group) @atomic.action_timer("nova.list_hypervisors") def _list_hypervisors(self, detailed=True): """List hypervisors.""" return self.admin_clients("nova").hypervisors.list(detailed) @atomic.action_timer("nova.statistics_hypervisors") def _statistics_hypervisors(self): """Get hypervisor statistics over all compute nodes. :returns: Hypervisor statistics """ return self.admin_clients("nova").hypervisors.statistics() @atomic.action_timer("nova.get_hypervisor") def _get_hypervisor(self, hypervisor): """Get a specific hypervisor. :param hypervisor: Hypervisor to get. :returns: Hypervisor object """ return self.admin_clients("nova").hypervisors.get(hypervisor) @atomic.action_timer("nova.search_hypervisors") def _search_hypervisors(self, hypervisor_match, servers=False): """List all servers belonging to specific hypervisor. :param hypervisor_match: Hypervisor's host name. :param servers: If True, server information is also retrieved. :returns: Hypervisor object """ return self.admin_clients("nova").hypervisors.search(hypervisor_match, servers=servers) @atomic.action_timer("nova.lock_server") def _lock_server(self, server): """Lock the given server. :param server: Server to lock """ server.lock() @atomic.action_timer("nova.uptime_hypervisor") def _uptime_hypervisor(self, hypervisor): """Display the uptime of the specified hypervisor. :param hypervisor: Hypervisor to get. :returns: Hypervisor object """ return self.admin_clients("nova").hypervisors.uptime(hypervisor) @atomic.action_timer("nova.unlock_server") def _unlock_server(self, server): """Unlock the given server. :param server: Server to unlock """ server.unlock() @atomic.action_timer("nova.delete_network") def _delete_network(self, net_id): """Delete nova network. :param net_id: The nova-network ID to delete """ return self.admin_clients("nova").networks.delete(net_id) @atomic.action_timer("nova.list_flavors") def _list_flavors(self, detailed=True, **kwargs): """List all flavors. :param kwargs: Optional additional arguments for flavor listing :param detailed: True if the image listing should contain detailed information :returns: flavors list """ return self.clients("nova").flavors.list(detailed, **kwargs) @atomic.action_timer("nova.set_flavor_keys") def _set_flavor_keys(self, flavor, extra_specs): """set flavor keys :param flavor: flavor to set keys :param extra_specs: additional arguments for flavor set keys """ return flavor.set_keys(extra_specs) @atomic.action_timer("nova.list_agents") def _list_agents(self, hypervisor=None): """List all nova-agent builds. :param hypervisor: The nova-hypervisor ID on which we need to list all the builds :returns: Nova-agent build list """ return self.admin_clients("nova").agents.list(hypervisor) @atomic.action_timer("nova.list_aggregates") def _list_aggregates(self): """Returns list of all os-aggregates.""" return self.admin_clients("nova").aggregates.list() @atomic.action_timer("nova.list_availability_zones") def _list_availability_zones(self, detailed=True): """List availability-zones. :param detailed: True if the availability-zone listing should contain detailed information :returns: Availability-zone list """ return self.admin_clients("nova").availability_zones.list(detailed) @atomic.action_timer("nova.list_interfaces") def _list_interfaces(self, server): """List interfaces attached to a server. :param server:Instance or ID of server. :returns: Server interface list """ return self.clients("nova").servers.interface_list(server) @atomic.action_timer("nova.list_services") def _list_services(self, host=None, binary=None): """return all nova service details :param host: List all nova services on host :param binary: List all nova services matching given binary """ return self.admin_clients("nova").services.list(host, binary) @atomic.action_timer("nova.create_flavor") def _create_flavor(self, ram, vcpus, disk, **kwargs): """Create a flavor :param ram: Memory in MB for the flavor :param vcpus: Number of VCPUs for the flavor :param disk: Size of local disk in GB :param kwargs: Optional additional arguments for flavor creation """ name = self.generate_random_name() return self.admin_clients("nova").flavors.create(name, ram, vcpus, disk, **kwargs) @atomic.action_timer("nova.delete_flavor") def _delete_flavor(self, flavor): """Delete a flavor :param flavor: The ID of the :class:`Flavor` :returns: An instance of novaclient.base.TupleWithMeta """ return self.admin_clients("nova").flavors.delete(flavor) @atomic.action_timer("nova.list_flavor_access") def _list_flavor_access(self, flavor): """List access-rules for non-public flavor. :param flavor: List access rules for flavor instance or flavor ID """ return self.admin_clients("nova").flavor_access.list(flavor=flavor) @atomic.action_timer("nova.add_tenant_access") def _add_tenant_access(self, flavor, tenant): """Add a tenant to the given flavor access list. :param flavor: name or id of the object flavor :param tenant: id of the object tenant :returns: access rules for flavor instance or flavor ID """ return self.admin_clients("nova").flavor_access.add_tenant_access( flavor, tenant) @atomic.action_timer("nova.update_server") def _update_server(self, server, description=None): """update the server's name and description. :param server: Server object :param description: update the server description :returns: The updated server """ new_name = self.generate_random_name() if description: return server.update(name=new_name, description=description) else: return server.update(name=new_name) @atomic.action_timer("nova.get_flavor") def _get_flavor(self, flavor_id): """Show a flavor :param flavor_id: The flavor ID to get """ return self.admin_clients("nova").flavors.get(flavor_id) @atomic.action_timer("nova.create_aggregate") def _create_aggregate(self, availability_zone): """Create a new aggregate. :param availability_zone: The availability zone of the aggregate :returns: The created aggregate """ aggregate_name = self.generate_random_name() return self.admin_clients("nova").aggregates.create(aggregate_name, availability_zone) @atomic.action_timer("nova.get_aggregate_details") def _get_aggregate_details(self, aggregate): """Get details of the specified aggregate. :param aggregate: The aggregate to get details :returns: Detailed information of aggregate """ return self.admin_clients("nova").aggregates.get_details(aggregate) @atomic.action_timer("nova.delete_aggregate") def _delete_aggregate(self, aggregate): """Delete the specified aggregate. :param aggregate: The aggregate to delete :returns: An instance of novaclient.base.TupleWithMeta """ return self.admin_clients("nova").aggregates.delete(aggregate) def _bind_actions(self): actions = ["hard_reboot", "soft_reboot", "stop_start", "rescue_unrescue", "pause_unpause", "suspend_resume", "lock_unlock", "shelve_unshelve"] action_builder = utils.ActionBuilder(actions) action_builder.bind_action("hard_reboot", self._reboot_server) action_builder.bind_action("soft_reboot", self._soft_reboot_server) action_builder.bind_action("stop_start", self._stop_and_start_server) action_builder.bind_action("rescue_unrescue", self._rescue_and_unrescue_server) action_builder.bind_action("pause_unpause", self._pause_and_unpause_server) action_builder.bind_action("suspend_resume", self._suspend_and_resume_server) action_builder.bind_action("lock_unlock", self._lock_and_unlock_server) action_builder.bind_action("shelve_unshelve", self._shelve_and_unshelve_server) return action_builder @atomic.action_timer("nova.stop_and_start_server") def _stop_and_start_server(self, server): """Stop and then start the given server. A stop will be issued on the given server upon which time this method will wait for the server to become 'SHUTOFF'. Once the server is SHUTOFF a start will be issued and this method will wait for the server to become 'ACTIVE' again. :param server: The server to stop and then start. """ self._stop_server(server) self._start_server(server) @atomic.action_timer("nova.rescue_and_unrescue_server") def _rescue_and_unrescue_server(self, server): """Rescue and then unrescue the given server. A rescue will be issued on the given server upon which time this method will wait for the server to become 'RESCUE'. Once the server is RESCUE an unrescue will be issued and this method will wait for the server to become 'ACTIVE' again. :param server: The server to rescue and then unrescue. """ self._rescue_server(server) self._unrescue_server(server) @atomic.action_timer("nova.pause_and_unpause_server") def _pause_and_unpause_server(self, server): """Pause and then unpause the given server. A pause will be issued on the given server upon which time this method will wait for the server to become 'PAUSED'. Once the server is PAUSED an unpause will be issued and this method will wait for the server to become 'ACTIVE' again. :param server: The server to pause and then unpause. """ self._pause_server(server) self._unpause_server(server) @atomic.action_timer("nova.suspend_and_resume_server") def _suspend_and_resume_server(self, server): """Suspend and then resume the given server. A suspend will be issued on the given server upon which time this method will wait for the server to become 'SUSPENDED'. Once the server is SUSPENDED an resume will be issued and this method will wait for the server to become 'ACTIVE' again. :param server: The server to suspend and then resume. """ self._suspend_server(server) self._resume_server(server) @atomic.action_timer("nova.lock_and_unlock_server") def _lock_and_unlock_server(self, server): """Lock and then unlock the given server. A lock will be issued on the given server upon which time this method will wait for the server to become locked'. Once the server is locked an unlock will be issued. :param server: The server to lock and then unlock. """ self._lock_server(server) self._unlock_server(server) @atomic.action_timer("nova.shelve_and_unshelve_server") def _shelve_and_unshelve_server(self, server): """Shelve and then unshelve the given server. A shelve will be issued on the given server upon which time this method will wait for the server to become 'SHELVED'. Once the server is SHELVED an unshelve will be issued and this method will wait for the server to become 'ACTIVE' again. :param server: The server to shelve and then unshelve. """ self._shelve_server(server) self._unshelve_server(server) @atomic.action_timer("nova.update_aggregate") def _update_aggregate(self, aggregate): """Update the aggregate's name and availability_zone. :param aggregate: The aggregate to update :return: The updated aggregate """ aggregate_name = self.generate_random_name() availability_zone = self.generate_random_name() values = {"name": aggregate_name, "availability_zone": availability_zone} return self.admin_clients("nova").aggregates.update(aggregate, values) @atomic.action_timer("nova.aggregate_add_host") def _aggregate_add_host(self, aggregate, host): """Add a host into the Host Aggregate. :param aggregate: The aggregate add host to :param host: The host add to aggregate :returns: The aggregate that has been added host to """ return self.admin_clients("nova").aggregates.add_host(aggregate, host) @atomic.action_timer("nova.aggregate_remove_host") def _aggregate_remove_host(self, aggregate, host): """Remove a host from an aggregate. :param aggregate: The aggregate remove host from :param host: The host to remove :returns: The aggregate that has been removed host from """ return self.admin_clients("nova").aggregates.remove_host(aggregate, host) @atomic.action_timer("nova.aggregate_set_metadata") def _aggregate_set_metadata(self, aggregate, metadata): """Set metadata to an aggregate :param aggregate: The aggregate to set metadata to :param metadata: The metadata to be set :return: The aggregate that has the set metadata """ return self.admin_clients("nova").aggregates.set_metadata(aggregate, metadata) @atomic.action_timer("nova.attach_interface") def _attach_interface(self, server, port_id=None, net_id=None, fixed_ip=None): """Attach a network_interface to an instance. :param server: The :class:`Server` (or its ID) to attach to. :param port_id: The port to attach. :param network_id: the Network to attach :param fixed_ip: the Fix_ip to attach :returns the server that has attach interface """ return self.clients("nova").servers.interface_attach(server, port_id, net_id, fixed_ip)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,790
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/contexts/network/allow_ssh.py
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import logging from rally.common import validation from rally_openstack.common.services.network import neutron from rally_openstack.task.cleanup import manager as resource_manager from rally_openstack.task import context LOG = logging.getLogger(__name__) # This method is simplified version to what neutron has def _rule_to_key(rule): def _normalize_rule_value(key, value): # This string is used as a placeholder for str(None), but shorter. none_char = "+" default = { "port_range_min": "1", "port_range_max": "65535" } if key == "remote_ip_prefix": all_address = ["0.0.0.0/0", "::/0", None] if value in all_address: return none_char elif value is None: return default.get(key, none_char) return str(value) # NOTE(andreykurilin): there are more actual comparison keys, but this set # should be enough for us. comparison_keys = [ "ethertype", "direction", "port_range_max", "port_range_min", "protocol", "remote_ip_prefix" ] return "_".join([_normalize_rule_value(x, rule.get(x)) for x in comparison_keys]) _RULES_TO_ADD = [ { "ethertype": "IPv4", "protocol": "tcp", "port_range_max": 65535, "port_range_min": 1, "remote_ip_prefix": "0.0.0.0/0", "direction": "ingress" }, { "ethertype": "IPv6", "protocol": "tcp", "port_range_max": 65535, "port_range_min": 1, "remote_ip_prefix": "::/0", "direction": "ingress" }, { "ethertype": "IPv4", "protocol": "udp", "port_range_max": 65535, "port_range_min": 1, "remote_ip_prefix": "0.0.0.0/0", "direction": "ingress" }, { "ethertype": "IPv6", "protocol": "udp", "port_range_max": 65535, "port_range_min": 1, "remote_ip_prefix": "::/0", "direction": "ingress" }, { "ethertype": "IPv4", "protocol": "icmp", "remote_ip_prefix": "0.0.0.0/0", "direction": "ingress" }, { "ethertype": "IPv6", "protocol": "ipv6-icmp", "remote_ip_prefix": "::/0", "direction": "ingress" } ] @validation.add("required_platform", platform="openstack", users=True) @context.configure(name="allow_ssh", platform="openstack", order=320) class AllowSSH(context.OpenStackContext): """Sets up security groups for all users to access VM via SSH.""" def setup(self): client = neutron.NeutronService( clients=self.context["users"][0]["credential"].clients(), name_generator=self.generate_random_name, atomic_inst=self.atomic_actions() ) if not client.supports_extension("security-group", silent=True): LOG.info("Security group context is disabled.") return secgroup_name = self.generate_random_name() secgroups_per_tenant = {} for user, tenant_id in self._iterate_per_tenants(): client = neutron.NeutronService( clients=user["credential"].clients(), name_generator=self.generate_random_name, atomic_inst=self.atomic_actions() ) secgroup = client.create_security_group( name=secgroup_name, description="Allow ssh access to VMs created by Rally") secgroups_per_tenant[tenant_id] = secgroup existing_rules = set( _rule_to_key(rule) for rule in secgroup.get("security_group_rules", [])) for new_rule in _RULES_TO_ADD: if _rule_to_key(new_rule) not in existing_rules: secgroup.setdefault("security_group_rules", []) secgroup["security_group_rules"].append( client.create_security_group_rule( security_group_id=secgroup["id"], **new_rule) ) for user in self.context["users"]: user["secgroup"] = secgroups_per_tenant[user["tenant_id"]] def cleanup(self): resource_manager.cleanup( names=["neutron.security_group"], admin=self.context.get("admin"), users=self.context["users"], task_id=self.get_owner_id(), superclass=self.__class__ )
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,791
openstack/rally-openstack
refs/heads/master
/tests/unit/task/contexts/swift/test_utils.py
# Copyright 2015: Cisco Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally_openstack.task import context from rally_openstack.task.contexts.swift import utils from tests.unit import test class SwiftContext(utils.SwiftObjectMixin, context.OpenStackContext): def __init__(self, context): self.context = context def setup(self): pass def cleanup(self): pass class SwiftObjectMixinTestCase(test.TestCase): @mock.patch("rally_openstack.common.osclients.Clients") def test__create_containers(self, mock_clients): tenants = 2 containers_per_tenant = 2 context = test.get_test_context() c = [mock.MagicMock(), mock.MagicMock()] context.update({ "tenants": { "1001": {"name": "t1_name"}, "1002": {"name": "t2_name"} }, "users": [ {"id": "u1", "tenant_id": "1001", "credential": c[0]}, {"id": "u2", "tenant_id": "1002", "credential": c[1]} ] }) mixin = SwiftContext(context) containers = mixin._create_containers(containers_per_tenant, 15) self.assertEqual(tenants * containers_per_tenant, len(containers)) for index, container in enumerate(sorted(containers)): offset = int(index / containers_per_tenant) + 1 self.assertEqual(str(1000 + offset), container[0]) for index, tenant_id in enumerate(sorted(context["tenants"]), start=1): containers = context["tenants"][tenant_id]["containers"] self.assertEqual(containers_per_tenant, len(containers)) for container in containers: self.assertEqual("u%d" % index, container["user"]["id"]) self.assertEqual(c[index - 1], container["user"]["credential"]) self.assertEqual(0, len(container["objects"])) @mock.patch("rally_openstack.common.osclients.Clients") def test__create_objects(self, mock_clients): tenants = 2 containers_per_tenant = 1 objects_per_container = 5 context = test.get_test_context() context.update({ "tenants": { "1001": { "name": "t1_name", "containers": [ {"user": { "id": "u1", "tenant_id": "1001", "credential": mock.MagicMock()}, "container": "c1", "objects": []} ] }, "1002": { "name": "t2_name", "containers": [ {"user": { "id": "u2", "tenant_id": "1002", "credential": mock.MagicMock()}, "container": "c2", "objects": []} ] } } }) mixin = SwiftContext(context) objects_list = mixin._create_objects(objects_per_container, 1024, 25) self.assertEqual( tenants * containers_per_tenant * objects_per_container, len(objects_list)) chunk = containers_per_tenant * objects_per_container for index, obj in enumerate(sorted(objects_list)): offset = int(index / chunk) + 1 self.assertEqual(str(1000 + offset), obj[0]) self.assertEqual("c%d" % offset, obj[1]) for tenant_id in context["tenants"]: for container in context["tenants"][tenant_id]["containers"]: self.assertEqual(objects_per_container, len(container["objects"])) @mock.patch("rally_openstack.common.osclients.Clients") def test__delete_containers(self, mock_clients): context = test.get_test_context() context.update({ "tenants": { "1001": { "name": "t1_name", "containers": [ {"user": { "id": "u1", "tenant_id": "1001", "credential": mock.MagicMock()}, "container": "c1", "objects": []} ] }, "1002": { "name": "t2_name", "containers": [ {"user": { "id": "u2", "tenant_id": "1002", "credential": mock.MagicMock()}, "container": "c2", "objects": []} ] } } }) SwiftContext(context)._delete_containers(1) mock_swift = mock_clients.return_value.swift.return_value expected_containers = ["c1", "c2"] mock_swift.delete_container.assert_has_calls( [mock.call(con) for con in expected_containers], any_order=True) for tenant_id in context["tenants"]: self.assertEqual(0, len(context["tenants"][tenant_id]["containers"])) @mock.patch("rally_openstack.common.osclients.Clients") def test__delete_objects(self, mock_clients): context = test.get_test_context() context.update({ "tenants": { "1001": { "name": "t1_name", "containers": [ {"user": { "id": "u1", "tenant_id": "1001", "credential": mock.MagicMock()}, "container": "c1", "objects": ["o1", "o2", "o3"]} ] }, "1002": { "name": "t2_name", "containers": [ {"user": { "id": "u2", "tenant_id": "1002", "credential": mock.MagicMock()}, "container": "c2", "objects": ["o4", "o5", "o6"]} ] } } }) SwiftContext(context)._delete_objects(1) mock_swift = mock_clients.return_value.swift.return_value expected_objects = [("c1", "o1"), ("c1", "o2"), ("c1", "o3"), ("c2", "o4"), ("c2", "o5"), ("c2", "o6")] mock_swift.delete_object.assert_has_calls( [mock.call(con, obj) for con, obj in expected_objects], any_order=True) for tenant_id in context["tenants"]: for container in context["tenants"][tenant_id]["containers"]: self.assertEqual(0, len(container["objects"]))
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,792
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/cinder/test_utils.py
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally_openstack.common import credential from rally_openstack.task.scenarios.cinder import utils from tests.unit import test class CinderBasicTestCase(test.ScenarioTestCase): def _get_context(self): context = test.get_test_context() cred = credential.OpenStackCredential(auth_url="url", username="user", password="pass") context.update({ "admin": { "id": "fake_user_id", "credential": cred }, "user": {"id": "fake_user_id", "credential": cred}, "tenant": {"id": "fake", "name": "fake", "volumes": [{"id": "uuid", "size": 1}], "servers": [1]}}) return context def setUp(self): super(CinderBasicTestCase, self).setUp() @mock.patch("random.choice") def test_get_random_server(self, mock_choice): basic = utils.CinderBasic(self._get_context()) server_id = mock_choice(basic.context["tenant"]["servers"]) return_server = basic.get_random_server() basic.clients("nova").servers.get.assert_called_once_with(server_id) self.assertEqual(basic.clients("nova").servers.get.return_value, return_server)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,793
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/octavia/pools.py
# Copyright 2018: Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.task import validation from rally_openstack.common import consts from rally_openstack.task import scenario from rally_openstack.task.scenarios.octavia import utils """Scenarios for Octavia Loadbalancer pools.""" @validation.add("required_services", services=[consts.Service.OCTAVIA]) @validation.add("required_platform", platform="openstack", users=True) @validation.add("required_contexts", contexts=["network"]) @scenario.configure(context={"cleanup@openstack": ["octavia"]}, name="Octavia.create_and_list_pools", platform="openstack") class CreateAndListPools(utils.OctaviaBase): def run(self, protocol, lb_algorithm): """Create a loadbalancer pool per each subnet and then pools. :param protocol: protocol for which the pool listens :param lb_algorithm: loadbalancer algorithm """ subnets = [] loadbalancers = [] networks = self.context.get("tenant", {}).get("networks", []) project_id = self.context["tenant"]["id"] for network in networks: subnets.extend(network.get("subnets", [])) for subnet_id in subnets: lb = self.octavia.load_balancer_create( project_id=project_id, subnet_id=subnet_id) loadbalancers.append(lb) for loadbalancer in loadbalancers: self.octavia.wait_for_loadbalancer_prov_status(loadbalancer) self.octavia.pool_create( lb_id=loadbalancer["id"], protocol=protocol, lb_algorithm=lb_algorithm) self.octavia.pool_list() @validation.add("required_services", services=[consts.Service.OCTAVIA]) @validation.add("required_platform", platform="openstack", users=True) @validation.add("required_contexts", contexts=["network"]) @scenario.configure(context={"cleanup@openstack": ["octavia"]}, name="Octavia.create_and_delete_pools", platform="openstack") class CreateAndDeletePools(utils.OctaviaBase): def run(self, protocol, lb_algorithm): """Create a pool per each subnet and then delete pool :param protocol: protocol for which the pool listens :param lb_algorithm: loadbalancer algorithm """ subnets = [] loadbalancers = [] networks = self.context.get("tenant", {}).get("networks", []) project_id = self.context["tenant"]["id"] for network in networks: subnets.extend(network.get("subnets", [])) for subnet_id in subnets: lb = self.octavia.load_balancer_create( project_id=project_id, subnet_id=subnet_id) loadbalancers.append(lb) for loadbalancer in loadbalancers: self.octavia.wait_for_loadbalancer_prov_status(loadbalancer) pools = self.octavia.pool_create( lb_id=loadbalancer["id"], protocol=protocol, lb_algorithm=lb_algorithm) self.octavia.pool_delete(pools["id"]) @validation.add("required_services", services=[consts.Service.OCTAVIA]) @validation.add("required_platform", platform="openstack", users=True) @validation.add("required_contexts", contexts=["network"]) @scenario.configure(context={"cleanup@openstack": ["octavia"]}, name="Octavia.create_and_update_pools", platform="openstack") class CreateAndUpdatePools(utils.OctaviaBase): def run(self, protocol, lb_algorithm): """Create a pool per each subnet and then update :param protocol: protocol for which the pool listens :param lb_algorithm: loadbalancer algorithm """ subnets = [] loadbalancers = [] networks = self.context.get("tenant", {}).get("networks", []) project_id = self.context["tenant"]["id"] for network in networks: subnets.extend(network.get("subnets", [])) for subnet_id in subnets: lb = self.octavia.load_balancer_create( project_id=project_id, subnet_id=subnet_id) loadbalancers.append(lb) update_pool = { "name": self.generate_random_name() } for loadbalancer in loadbalancers: self.octavia.wait_for_loadbalancer_prov_status(loadbalancer) pools = self.octavia.pool_create( lb_id=loadbalancer["id"], protocol=protocol, lb_algorithm=lb_algorithm) self.octavia.pool_set( pool_id=pools["id"], pool_update_args=update_pool) @validation.add("required_services", services=[consts.Service.OCTAVIA]) @validation.add("required_platform", platform="openstack", users=True) @validation.add("required_contexts", contexts=["network"]) @scenario.configure(context={"cleanup@openstack": ["octavia"]}, name="Octavia.create_and_show_pools", platform="openstack") class CreateAndShowPools(utils.OctaviaBase): def run(self, protocol, lb_algorithm): """Create a pool per each subnet and show it :param protocol: protocol for which the pool listens :param lb_algorithm: loadbalancer algorithm """ subnets = [] loadbalancers = [] networks = self.context.get("tenant", {}).get("networks", []) project_id = self.context["tenant"]["id"] for network in networks: subnets.extend(network.get("subnets", [])) for subnet_id in subnets: lb = self.octavia.load_balancer_create( project_id=project_id, subnet_id=subnet_id) loadbalancers.append(lb) for loadbalancer in loadbalancers: self.octavia.wait_for_loadbalancer_prov_status(loadbalancer) pools = self.octavia.pool_create( lb_id=loadbalancer["id"], protocol=protocol, lb_algorithm=lb_algorithm) self.octavia.pool_show(pools["id"])
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,794
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/heat/utils.py
# Copyright 2014: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import cfg from rally.common import logging from rally import exceptions from rally.task import atomic from rally.task import utils import requests from rally_openstack.task import scenario LOG = logging.getLogger(__name__) CONF = cfg.CONF class HeatScenario(scenario.OpenStackScenario): """Base class for Heat scenarios with basic atomic actions.""" @atomic.action_timer("heat.list_stacks") def _list_stacks(self): """Return user stack list.""" return list(self.clients("heat").stacks.list()) @atomic.action_timer("heat.create_stack") def _create_stack(self, template, parameters=None, files=None, environment=None): """Create a new stack. :param template: template with stack description. :param parameters: template parameters used during stack creation :param files: additional files used in template :param environment: stack environment definition :returns: object of stack """ stack_name = self.generate_random_name() kw = { "stack_name": stack_name, "disable_rollback": True, "parameters": parameters or {}, "template": template, "files": files or {}, "environment": environment or {} } # heat client returns body instead manager object, so we should # get manager object using stack_id stack_id = self.clients("heat").stacks.create(**kw)["stack"]["id"] stack = self.clients("heat").stacks.get(stack_id) self.sleep_between(CONF.openstack.heat_stack_create_prepoll_delay) stack = utils.wait_for_status( stack, ready_statuses=["CREATE_COMPLETE"], failure_statuses=["CREATE_FAILED", "ERROR"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.heat_stack_create_timeout, check_interval=CONF.openstack.heat_stack_create_poll_interval) return stack @atomic.action_timer("heat.update_stack") def _update_stack(self, stack, template, parameters=None, files=None, environment=None): """Update an existing stack :param stack: stack that need to be updated :param template: Updated template :param parameters: template parameters for stack update :param files: additional files used in template :param environment: stack environment definition :returns: object of updated stack """ kw = { "stack_name": stack.stack_name, "disable_rollback": True, "parameters": parameters or {}, "template": template, "files": files or {}, "environment": environment or {} } self.clients("heat").stacks.update(stack.id, **kw) self.sleep_between(CONF.openstack.heat_stack_update_prepoll_delay) stack = utils.wait_for_status( stack, ready_statuses=["UPDATE_COMPLETE"], failure_statuses=["UPDATE_FAILED", "ERROR"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.heat_stack_update_timeout, check_interval=CONF.openstack.heat_stack_update_poll_interval) return stack @atomic.action_timer("heat.check_stack") def _check_stack(self, stack): """Check given stack. Check the stack and stack resources. :param stack: stack that needs to be checked """ self.clients("heat").actions.check(stack.id) utils.wait_for_status( stack, ready_statuses=["CHECK_COMPLETE"], failure_statuses=["CHECK_FAILED", "ERROR"], update_resource=utils.get_from_manager(["CHECK_FAILED"]), timeout=CONF.openstack.heat_stack_check_timeout, check_interval=CONF.openstack.heat_stack_check_poll_interval) @atomic.action_timer("heat.delete_stack") def _delete_stack(self, stack): """Delete given stack. Returns when the stack is actually deleted. :param stack: stack object """ stack.delete() utils.wait_for_status( stack, ready_statuses=["DELETE_COMPLETE"], failure_statuses=["DELETE_FAILED", "ERROR"], check_deletion=True, update_resource=utils.get_from_manager(), timeout=CONF.openstack.heat_stack_delete_timeout, check_interval=CONF.openstack.heat_stack_delete_poll_interval) @atomic.action_timer("heat.suspend_stack") def _suspend_stack(self, stack): """Suspend given stack. :param stack: stack that needs to be suspended """ self.clients("heat").actions.suspend(stack.id) utils.wait_for_status( stack, ready_statuses=["SUSPEND_COMPLETE"], failure_statuses=["SUSPEND_FAILED", "ERROR"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.heat_stack_suspend_timeout, check_interval=CONF.openstack.heat_stack_suspend_poll_interval) @atomic.action_timer("heat.resume_stack") def _resume_stack(self, stack): """Resume given stack. :param stack: stack that needs to be resumed """ self.clients("heat").actions.resume(stack.id) utils.wait_for_status( stack, ready_statuses=["RESUME_COMPLETE"], failure_statuses=["RESUME_FAILED", "ERROR"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.heat_stack_resume_timeout, check_interval=CONF.openstack.heat_stack_resume_poll_interval) @atomic.action_timer("heat.snapshot_stack") def _snapshot_stack(self, stack): """Creates a snapshot for given stack. :param stack: stack that will be used as base for snapshot :returns: snapshot created for given stack """ snapshot = self.clients("heat").stacks.snapshot( stack.id) utils.wait_for_status( stack, ready_statuses=["SNAPSHOT_COMPLETE"], failure_statuses=["SNAPSHOT_FAILED", "ERROR"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.heat_stack_snapshot_timeout, check_interval=CONF.openstack.heat_stack_snapshot_poll_interval) return snapshot @atomic.action_timer("heat.restore_stack") def _restore_stack(self, stack, snapshot_id): """Restores stack from given snapshot. :param stack: stack that will be restored from snapshot :param snapshot_id: id of given snapshot """ self.clients("heat").stacks.restore(stack.id, snapshot_id) utils.wait_for_status( stack, ready_statuses=["RESTORE_COMPLETE"], failure_statuses=["RESTORE_FAILED", "ERROR"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.heat_stack_restore_timeout, check_interval=CONF.openstack.heat_stack_restore_poll_interval ) @atomic.action_timer("heat.show_output") def _stack_show_output(self, stack, output_key): """Execute output_show for specified "output_key". This method uses new output API call. :param stack: stack with output_key output. :param output_key: The name of the output. """ output = self.clients("heat").stacks.output_show(stack.id, output_key) return output @atomic.action_timer("heat.show_output_via_API") def _stack_show_output_via_API(self, stack, output_key): """Execute output_show for specified "output_key". This method uses old way for getting output value. It gets whole stack object and then finds necessary "output_key". :param stack: stack with output_key output. :param output_key: The name of the output. """ # this code copy-pasted and adopted for rally from old client version # https://github.com/openstack/python-heatclient/blob/0.8.0/heatclient/ # v1/shell.py#L682-L699 stack = self.clients("heat").stacks.get(stack_id=stack.id) for output in stack.to_dict().get("outputs", []): if output["output_key"] == output_key: return output @atomic.action_timer("heat.list_output") def _stack_list_output(self, stack): """Execute output_list for specified "stack". This method uses new output API call. :param stack: stack to call output-list. """ output_list = self.clients("heat").stacks.output_list(stack.id) return output_list @atomic.action_timer("heat.list_output_via_API") def _stack_list_output_via_API(self, stack): """Execute output_list for specified "stack". This method uses old way for getting output value. It gets whole stack object and then prints all outputs belongs this stack. :param stack: stack to call output-list. """ # this code copy-pasted and adopted for rally from old client version # https://github.com/openstack/python-heatclient/blob/0.8.0/heatclient/ # v1/shell.py#L649-L663 stack = self.clients("heat").stacks.get(stack_id=stack.id) output_list = stack.to_dict()["outputs"] return output_list def _count_instances(self, stack): """Count instances in a Heat stack. :param stack: stack to count instances in. """ return len([ r for r in self.clients("heat").resources.list(stack.id, nested_depth=1) if r.resource_type == "OS::Nova::Server"]) def _scale_stack(self, stack, output_key, delta): """Scale a stack up or down. Calls the webhook given in the output value identified by 'output_key', and waits for the stack size to change by 'delta'. :param stack: stack to scale up or down :param output_key: The name of the output to get the URL from :param delta: The expected change in number of instances in the stack (signed int) """ num_instances = self._count_instances(stack) expected_instances = num_instances + delta LOG.debug("Scaling stack %s from %s to %s instances with %s" % (stack.id, num_instances, expected_instances, output_key)) with atomic.ActionTimer(self, "heat.scale_with_%s" % output_key): self._stack_webhook(stack, output_key) utils.wait_for( stack, is_ready=lambda s: ( self._count_instances(s) == expected_instances), failure_statuses=["UPDATE_FAILED", "ERROR"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.heat_stack_scale_timeout, check_interval=CONF.openstack.heat_stack_scale_poll_interval) def _stack_webhook(self, stack, output_key): """POST to the URL given in the output value identified by output_key. This can be used to scale stacks up and down, for instance. :param stack: stack to call a webhook on :param output_key: The name of the output to get the URL from :raises InvalidConfigException: if the output key is not found """ url = None for output in stack.outputs: if output["output_key"] == output_key: url = output["output_value"] break else: raise exceptions.InvalidConfigException( "No output key %(key)s found in stack %(id)s" % {"key": output_key, "id": stack.id}) platform_params = self.context["env"]["spec"]["existing@openstack"] verify = (platform_params.get("https_cacert") if not platform_params.get("https_insecure") else False) with atomic.ActionTimer(self, "heat.%s_webhook" % output_key): requests.post(url, verify=verify).raise_for_status()
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,795
openstack/rally-openstack
refs/heads/master
/tests/unit/task/test_context.py
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally_openstack.task import context from tests.unit import test class TenantIteratorTestCase(test.TestCase): def test__iterate_per_tenant(self): class DummyContext(context.OpenStackContext): def __init__(self, ctx): self.context = ctx def setup(self): pass def cleanup(self): pass users = [] tenants_count = 2 users_per_tenant = 5 for tenant_id in range(tenants_count): for user_id in range(users_per_tenant): users.append({"id": str(user_id), "tenant_id": str(tenant_id)}) expected_result = [ ({"id": "0", "tenant_id": str(i)}, str(i)) for i in range( tenants_count)] real_result = [ i for i in DummyContext({"users": users})._iterate_per_tenants()] self.assertEqual(expected_result, real_result)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,796
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/grafana/metrics.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import cfg from rally.common import logging from rally.task import types from rally.task import utils from rally.task import validation from rally_openstack.common import consts from rally_openstack.common.services.grafana import grafana as grafana_service from rally_openstack.task import scenario CONF = cfg.CONF LOG = logging.getLogger(__name__) """Scenarios for Pushgateway and Grafana metrics.""" @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="GrafanaMetrics.push_metric_from_instance", platform="openstack") class PushMetricsInstance(scenario.OpenStackScenario): """Test monitoring system by pushing metric from nova server and check it. Scenario tests monitoring system, which uses Pushgateway as metric exporter and Grafana as metrics monitoring. The goal of the test is to check that monitoring system works correctly with nova instance. Test case is the following: we deploy some env with nodes on Openstack nova instances, add metric exporter (using Pushgateway in this test) inside nodes (i.e. nova instances) for some interested metrics (e.g. CPU, memory etc.). We want to check that metrics successfully sends to metrics storage (e.g. Prometheus) by requesting Grafana. Create nova instance, add Pushgateway push random metric to userdata and after instance would be available, check Grafana datasource that pushed metric in data. """ def _metric_from_instance(self, seed, image, flavor, monitor_vip, pushgateway_port, job_name): push_cmd = ( "echo %(seed)s 12345 | curl --data-binary " "@- http://%(monitor_vip)s:%(pgtw_port)s/metrics/job" "/%(job_name)s" % {"seed": seed, "monitor_vip": monitor_vip, "pgtw_port": pushgateway_port, "job_name": job_name}) userdata = ("#!/bin/bash\n%s" % push_cmd) server = self.clients("nova").servers.create(seed, image, flavor, userdata=userdata) LOG.info("Server %s create started" % seed) self.sleep_between(CONF.openstack.nova_server_boot_prepoll_delay) utils.wait_for_status( server, ready_statuses=["ACTIVE"], update_resource=utils.get_from_manager(), timeout=CONF.openstack.nova_server_boot_timeout, check_interval=CONF.openstack.nova_server_boot_poll_interval ) LOG.info("Server %s with pushing metric script (metric exporter) is " "active" % seed) def run(self, image, flavor, monitor_vip, pushgateway_port, grafana, datasource_id, job_name, sleep_time=5, retries_total=30): """Create nova instance with pushing metric script as userdata. Push metric to metrics storage using Pushgateway and check it in Grafana. :param image: image for server with userdata script :param flavor: flavor for server with userdata script :param monitor_vip: monitoring system IP to push metric :param pushgateway_port: Pushgateway port to use for pushing metric :param grafana: Grafana dict with creds and port to use for checking metric. Format: {user: admin, password: pass, port: 9902} :param datasource_id: metrics storage datasource ID in Grafana :param job_name: job name to push metric in it :param sleep_time: sleep time between checking metrics in seconds :param retries_total: total number of retries to check metric in Grafana """ seed = self.generate_random_name() grafana_svc = grafana_service.GrafanaService( dict(monitor_vip=monitor_vip, pushgateway_port=pushgateway_port, grafana=grafana, datasource_id=datasource_id, job_name=job_name), name_generator=self.generate_random_name, atomic_inst=self.atomic_actions()) self._metric_from_instance(seed, image, flavor, monitor_vip, pushgateway_port, job_name) checked = grafana_svc.check_metric(seed, sleep_time=sleep_time, retries_total=retries_total) self.assertTrue(checked) @scenario.configure(name="GrafanaMetrics.push_metric_locally") class PushMetricLocal(scenario.OpenStackScenario): """Test monitoring system availability with local pushing random metric.""" def run(self, monitor_vip, pushgateway_port, grafana, datasource_id, job_name, sleep_time=5, retries_total=30): """Push random metric to Pushgateway locally and check it in Grafana. :param monitor_vip: monitoring system IP to push metric :param pushgateway_port: Pushgateway port to use for pushing metric :param grafana: Grafana dict with creds and port to use for checking metric. Format: {user: admin, password: pass, port: 9902} :param datasource_id: metrics storage datasource ID in Grafana :param job_name: job name to push metric in it :param sleep_time: sleep time between checking metrics in seconds :param retries_total: total number of retries to check metric in Grafana """ seed = self.generate_random_name() grafana_svc = grafana_service.GrafanaService( dict(monitor_vip=monitor_vip, pushgateway_port=pushgateway_port, grafana=grafana, datasource_id=datasource_id, job_name=job_name), name_generator=self.generate_random_name, atomic_inst=self.atomic_actions()) pushed = grafana_svc.push_metric(seed) self.assertTrue(pushed) checked = grafana_svc.check_metric(seed, sleep_time=sleep_time, retries_total=retries_total) self.assertTrue(checked)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,797
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/magnum/clusters.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.task import validation from rally_openstack.common import consts from rally_openstack.task import scenario from rally_openstack.task.scenarios.magnum import utils from rally_openstack.task.scenarios.nova import utils as nova_utils """Scenarios for Magnum clusters.""" @validation.add("required_services", services=[consts.Service.MAGNUM]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["magnum.clusters"]}, name="MagnumClusters.list_clusters", platform="openstack") class ListClusters(utils.MagnumScenario): def run(self, **kwargs): """List all clusters. Measure the "magnum clusters-list" command performance. :param limit: (Optional) The maximum number of results to return per request, if: 1) limit > 0, the maximum number of clusters to return. 2) limit param is NOT specified (None), the number of items returned respect the maximum imposed by the Magnum API (see Magnum's api.max_limit option). :param kwargs: optional additional arguments for clusters listing """ self._list_clusters(**kwargs) @validation.add("required_services", services=[consts.Service.MAGNUM]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure( context={"cleanup@openstack": ["magnum.clusters", "nova.keypairs"]}, name="MagnumClusters.create_and_list_clusters", platform="openstack") class CreateAndListClusters(utils.MagnumScenario, nova_utils.NovaScenario): def run(self, node_count, **kwargs): """create cluster and then list all clusters. :param node_count: the cluster node count. :param cluster_template_uuid: optional, if user want to use an existing cluster_template :param kwargs: optional additional arguments for cluster creation """ cluster_template_uuid = kwargs.get("cluster_template_uuid", None) if cluster_template_uuid is None: cluster_template_uuid = self.context["tenant"]["cluster_template"] else: del kwargs["cluster_template_uuid"] keypair = self._create_keypair() new_cluster = self._create_cluster(cluster_template_uuid, node_count, keypair=keypair, **kwargs) self.assertTrue(new_cluster, "Failed to create new cluster") clusters = self._list_clusters(**kwargs) self.assertIn(new_cluster.uuid, [cluster.uuid for cluster in clusters], "New cluster not found in a list of clusters")
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,798
openstack/rally-openstack
refs/heads/master
/tests/unit/task/test_types.py
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally import exceptions from rally_openstack.task import types from tests.unit import fakes from tests.unit import test class OpenStackResourceTypeTestCase(test.TestCase): def test__find_resource(self): @types.configure(name=self.id()) class FooType(types.OpenStackResourceType): def pre_process(self, resource_spec, config): pass ftype = FooType({}) resources = dict( (name, fakes.FakeResource(name=name)) for name in ["Fake1", "Fake2", "Fake3"]) # case #1: 100% name match self.assertEqual( resources["Fake2"], ftype._find_resource({"name": "Fake2"}, resources.values())) # case #2: pick the latest one self.assertEqual( resources["Fake3"], ftype._find_resource({"name": "Fake"}, resources.values())) # case #3: regex one match self.assertEqual( resources["Fake2"], ftype._find_resource({"regex": ".ake2"}, resources.values())) # case #4: regex, pick the latest one self.assertEqual( resources["Fake3"], ftype._find_resource({"regex": "Fake"}, resources.values())) def test__find_resource_negative(self): @types.configure(name=self.id()) class FooType(types.OpenStackResourceType): def pre_process(self, resource_spec, config): pass ftype = FooType({}) # case #1: the wrong resource spec e = self.assertRaises(exceptions.InvalidScenarioArgument, ftype._find_resource, {}, []) self.assertIn("'id', 'name', or 'regex' not found", e.format_message()) # case #2: two matches for one name resources = [fakes.FakeResource(name="Fake1"), fakes.FakeResource(name="Fake2"), fakes.FakeResource(name="Fake1")] e = self.assertRaises( exceptions.InvalidScenarioArgument, ftype._find_resource, {"name": "Fake1"}, resources) self.assertIn("with name 'Fake1' is ambiguous, possible matches", e.format_message()) # case #3: no matches at all resources = [fakes.FakeResource(name="Fake1"), fakes.FakeResource(name="Fake2"), fakes.FakeResource(name="Fake3")] e = self.assertRaises( exceptions.InvalidScenarioArgument, ftype._find_resource, {"name": "Foo"}, resources) self.assertIn("with pattern 'Foo' not found", e.format_message()) # case #4: two matches for one name, but 'accurate' is True resources = [fakes.FakeResource(name="Fake1"), fakes.FakeResource(name="Fake2"), fakes.FakeResource(name="Fake3")] e = self.assertRaises( exceptions.InvalidScenarioArgument, ftype._find_resource, {"name": "Fake", "accurate": True}, resources) self.assertIn("with name 'Fake' not found", e.format_message()) # case #5: two matches for one name, but 'accurate' is True resources = [fakes.FakeResource(name="Fake1"), fakes.FakeResource(name="Fake2"), fakes.FakeResource(name="Fake3")] e = self.assertRaises( exceptions.InvalidScenarioArgument, ftype._find_resource, {"regex": "Fake", "accurate": True}, resources) self.assertIn("with name 'Fake' is ambiguous, possible matches", e.format_message()) class FlavorTestCase(test.TestCase): def setUp(self): super(FlavorTestCase, self).setUp() self.clients = fakes.FakeClients() self.clients.nova().flavors._cache(fakes.FakeResource(name="m1.tiny", id="1")) self.clients.nova().flavors._cache(fakes.FakeResource(name="m1.nano", id="42")) self.clients.nova().flavors._cache(fakes.FakeResource(name="m1.large", id="44")) self.clients.nova().flavors._cache(fakes.FakeResource(name="m1.large", id="45")) self.type_cls = types.Flavor( context={"admin": {"credential": mock.Mock()}}) self.type_cls._clients = self.clients def test_preprocess_by_id(self): resource_spec = {"id": "42"} flavor_id = self.type_cls.pre_process( resource_spec=resource_spec, config={}) self.assertEqual("42", flavor_id) def test_preprocess_by_name(self): resource_spec = {"name": "m1.nano"} flavor_id = self.type_cls.pre_process( resource_spec=resource_spec, config={}) self.assertEqual("42", flavor_id) def test_preprocess_by_name_no_match(self): resource_spec = {"name": "m1.medium"} self.assertRaises(exceptions.InvalidScenarioArgument, self.type_cls.pre_process, resource_spec=resource_spec, config={}) def test_preprocess_by_name_multiple_match(self): resource_spec = {"name": "m1.large"} self.assertRaises(exceptions.InvalidScenarioArgument, self.type_cls.pre_process, resource_spec=resource_spec, config={}) def test_preprocess_by_regex(self): resource_spec = {"regex": r"m(1|2)\.nano"} flavor_id = self.type_cls.pre_process( resource_spec=resource_spec, config={}) self.assertEqual("42", flavor_id) def test_preprocess_by_regex_multiple_match(self): resource_spec = {"regex": "^m1"} self.assertRaises(exceptions.InvalidScenarioArgument, self.type_cls.pre_process, resource_spec=resource_spec, config={}) def test_preprocess_by_regex_no_match(self): resource_spec = {} self.assertRaises(exceptions.InvalidScenarioArgument, self.type_cls.pre_process, resource_spec=resource_spec, config={}) class GlanceImageTestCase(test.TestCase): def setUp(self): super(GlanceImageTestCase, self).setUp() self.clients = fakes.FakeClients() image1 = fakes.FakeResource(name="cirros-0.5.2-uec", id="100") self.clients.glance().images._cache(image1) image2 = fakes.FakeResource(name="cirros-0.5.2-uec-ramdisk", id="101") self.clients.glance().images._cache(image2) image3 = fakes.FakeResource(name="cirros-0.5.2-uec-ramdisk-copy", id="102") self.clients.glance().images._cache(image3) image4 = fakes.FakeResource(name="cirros-0.5.2-uec-ramdisk-copy", id="103") self.clients.glance().images._cache(image4) self.type_cls = types.GlanceImage( context={"admin": {"credential": mock.Mock()}}) self.type_cls._clients = self.clients def test_preprocess_by_id(self): resource_spec = {"id": "100"} image_id = self.type_cls.pre_process( resource_spec=resource_spec, config={}) self.assertEqual("100", image_id) def test_preprocess_by_name(self): resource_spec = {"name": "^cirros-0.5.2-uec$"} image_id = self.type_cls.pre_process( resource_spec=resource_spec, config={}) self.assertEqual("100", image_id) def test_preprocess_by_name_no_match(self): resource_spec = {"name": "cirros-0.5.2-uec-boot"} self.assertRaises(exceptions.InvalidScenarioArgument, self.type_cls.pre_process, resource_spec=resource_spec, config={}) def test_preprocess_by_name_match_multiple(self): resource_spec = {"name": "cirros-0.5.2-uec-ramdisk-copy"} self.assertRaises(exceptions.InvalidScenarioArgument, self.type_cls.pre_process, resource_spec=resource_spec, config={}) def test_preprocess_by_regex(self): resource_spec = {"regex": "-uec$"} image_id = self.type_cls.pre_process( resource_spec=resource_spec, config={}) self.assertEqual("100", image_id) def test_preprocess_by_regex_match_multiple(self): resource_spec = {"regex": "^cirros"} image_id = self.type_cls.pre_process(resource_spec=resource_spec, config={}) # matching resources are sorted by the names. It is impossible to # predict which resource will be luckiest self.assertIn(image_id, ["102", "103"]) def test_preprocess_by_regex_no_match(self): resource_spec = {"regex": "-boot$"} self.assertRaises(exceptions.InvalidScenarioArgument, self.type_cls.pre_process, resource_spec=resource_spec, config={}) class GlanceImageArgsTestCase(test.TestCase): def test_preprocess(self): self.assertEqual( {}, types.GlanceImageArguments({}).pre_process( resource_spec={}, config={})) self.assertEqual( {"visibility": "public"}, types.GlanceImageArguments({}).pre_process( config={}, resource_spec={"visibility": "public"})) self.assertEqual( {"visibility": "public"}, types.GlanceImageArguments({}).pre_process( config={}, resource_spec={"visibility": "public", "is_public": False})) self.assertEqual( {"visibility": "private"}, types.GlanceImageArguments({}).pre_process( config={}, resource_spec={"is_public": False})) class EC2ImageTestCase(test.TestCase): def setUp(self): super(EC2ImageTestCase, self).setUp() self.clients = fakes.FakeClients() image1 = fakes.FakeResource(name="cirros-0.5.2-uec", id="100") self.clients.glance().images._cache(image1) image2 = fakes.FakeResource(name="cirros-0.5.2-uec-ramdisk", id="102") self.clients.glance().images._cache(image2) image3 = fakes.FakeResource(name="cirros-0.5.2-uec-ramdisk-copy", id="102") self.clients.glance().images._cache(image3) image4 = fakes.FakeResource(name="cirros-0.5.2-uec-ramdisk-copy", id="103") self.clients.glance().images._cache(image4) ec2_image1 = fakes.FakeResource(name="cirros-0.5.2-uec", id="200") ec2_image2 = fakes.FakeResource(name="cirros-0.5.2-uec-ramdisk", id="201") ec2_image3 = fakes.FakeResource(name="cirros-0.5.2-uec-ramdisk-copy", id="202") ec2_image4 = fakes.FakeResource(name="cirros-0.5.2-uec-ramdisk-copy", id="203") self.clients.ec2().get_all_images = mock.Mock( return_value=[ec2_image1, ec2_image2, ec2_image3, ec2_image4]) self.type_cls = types.EC2Image( context={"admin": {"credential": mock.Mock()}}) self.type_cls._clients = self.clients def test_preprocess_by_name(self): resource_spec = {"name": "^cirros-0.5.2-uec$"} ec2_image_id = self.type_cls.pre_process(resource_spec=resource_spec, config={}) self.assertEqual("200", ec2_image_id) def test_preprocess_by_id(self): resource_spec = {"id": "100"} ec2_image_id = self.type_cls.pre_process(resource_spec=resource_spec, config={}) self.assertEqual("200", ec2_image_id) def test_preprocess_by_id_no_match(self): resource_spec = {"id": "101"} self.assertRaises(exceptions.InvalidScenarioArgument, self.type_cls.pre_process, resource_spec=resource_spec, config={}) def test_preprocess_by_name_no_match(self): resource_spec = {"name": "cirros-0.5.2-uec-boot"} self.assertRaises(exceptions.InvalidScenarioArgument, self.type_cls.pre_process, resource_spec=resource_spec, config={}) def test_preprocess_by_name_match_multiple(self): resource_spec = {"name": "cirros-0.5.2-uec-ramdisk-copy"} self.assertRaises(exceptions.InvalidScenarioArgument, self.type_cls.pre_process, resource_spec=resource_spec, config={}) def test_preprocess_by_regex(self): resource_spec = {"regex": "-uec$"} ec2_image_id = self.type_cls.pre_process(resource_spec=resource_spec, config={}) self.assertEqual("200", ec2_image_id) def test_preprocess_by_regex_match_multiple(self): resource_spec = {"regex": "^cirros"} self.assertRaises(exceptions.InvalidScenarioArgument, self.type_cls.pre_process, resource_spec=resource_spec, config={}) def test_preprocess_by_regex_no_match(self): resource_spec = {"regex": "-boot$"} self.assertRaises(exceptions.InvalidScenarioArgument, self.type_cls.pre_process, resource_spec=resource_spec, config={}) class VolumeTypeTestCase(test.TestCase): def setUp(self): super(VolumeTypeTestCase, self).setUp() cinder = mock.patch("rally_openstack.task.types.block.BlockStorage") self.service = cinder.start().return_value self.addCleanup(cinder.stop) volume_type1 = fakes.FakeResource(name="lvmdriver-1", id=100) self.type_cls = types.VolumeType( context={"admin": {"credential": mock.Mock()}}) self.service.list_types.return_value = [volume_type1] def test_preprocess_by_id(self): resource_spec = {"id": 100} volumetype_id = self.type_cls.pre_process(resource_spec=resource_spec, config={}) self.assertEqual(100, volumetype_id) def test_preprocess_by_name(self): resource_spec = {"name": "lvmdriver-1"} volumetype_id = self.type_cls.pre_process(resource_spec=resource_spec, config={}) self.assertEqual(100, volumetype_id) def test_preprocess_by_name_no_match(self): resource_spec = {"name": "nomatch-1"} self.assertRaises(exceptions.InvalidScenarioArgument, self.type_cls.pre_process, resource_spec=resource_spec, config={}) def test_preprocess_by_regex(self): resource_spec = {"regex": "^lvm.*-1"} volumetype_id = self.type_cls.pre_process(resource_spec=resource_spec, config={}) self.assertEqual(100, volumetype_id) def test_preprocess_by_regex_no_match(self): resource_spec = {"regex": "dd"} self.assertRaises(exceptions.InvalidScenarioArgument, self.type_cls.pre_process, resource_spec=resource_spec, config={}) class NeutronNetworkTestCase(test.TestCase): def setUp(self): super(NeutronNetworkTestCase, self).setUp() self.clients = fakes.FakeClients() net1_data = {"network": { "name": "net1" }} network1 = self.clients.neutron().create_network(net1_data) self.net1_id = network1["network"]["id"] self.type_cls = types.NeutronNetwork( context={"admin": {"credential": mock.Mock()}}) self.type_cls._clients = self.clients def test_preprocess_by_id(self): resource_spec = {"id": self.net1_id} network_id = self.type_cls.pre_process(resource_spec=resource_spec, config={}) self.assertEqual(network_id, self.net1_id) def test_preprocess_by_name(self): resource_spec = {"name": "net1"} network_id = self.type_cls.pre_process(resource_spec=resource_spec, config={}) self.assertEqual(network_id, self.net1_id) def test_preprocess_by_name_no_match(self): resource_spec = {"name": "nomatch-1"} self.assertRaises(exceptions.InvalidScenarioArgument, self.type_cls.pre_process, resource_spec=resource_spec, config={}) class WatcherStrategyTestCase(test.TestCase): def setUp(self): super(WatcherStrategyTestCase, self).setUp() self.clients = fakes.FakeClients() self.strategy = self.clients.watcher().strategy._cache( fakes.FakeResource(name="dummy", id="1")) self.type_cls = types.WatcherStrategy( context={"admin": {"credential": mock.Mock()}}) self.type_cls._clients = self.clients def test_preprocess_by_name(self): resource_spec = {"name": "dummy"} strategy_id = self.type_cls.pre_process(resource_spec=resource_spec, config={}) self.assertEqual(self.strategy.uuid, strategy_id) def test_preprocess_by_name_no_match(self): resource_spec = {"name": "dummy-1"} self.assertRaises(exceptions.RallyException, self.type_cls.pre_process, resource_spec=resource_spec, config={}) class WatcherGoalTestCase(test.TestCase): def setUp(self): super(WatcherGoalTestCase, self).setUp() self.clients = fakes.FakeClients() self.goal = self.clients.watcher().goal._cache( fakes.FakeResource(name="dummy", id="1")) self.type_cls = types.WatcherGoal( context={"admin": {"credential": mock.Mock()}}) self.type_cls._clients = self.clients def test_preprocess_by_name(self): resource_spec = {"name": "dummy"} goal_id = self.type_cls.pre_process(resource_spec=resource_spec, config={}) self.assertEqual(self.goal.uuid, goal_id) def test_preprocess_by_name_no_match(self): resource_spec = {"name": "dummy-1"} self.assertRaises(exceptions.RallyException, self.type_cls.pre_process, resource_spec=resource_spec, config={})
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,799
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/barbican/test_utils.py
# Copyright 2018 Red Hat Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally_openstack.task.scenarios.barbican import utils from tests.unit import test class BarbicanBaseTestCase(test.ScenarioTestCase): def setUp(self): super(BarbicanBaseTestCase, self).setUp() self.context = super(BarbicanBaseTestCase, self).get_test_context() self.context.update({ "admin": { "id": "fake_user_id", "credential": mock.MagicMock() }, "user": { "id": "fake_user_id", "credential": mock.MagicMock() }, "tenant": {"id": "fake_tenant_id", "name": "fake_tenant_name"} }) m = "rally_openstack.common.services.key_manager.barbican" patch = mock.patch("%s.BarbicanService" % m) self.addCleanup(patch.stop) self.mock_service = patch.start() def test_barbican_base(self): base = utils.BarbicanBase(self.context) self.assertEqual(base.admin_barbican, self.mock_service.return_value)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,800
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/vm/utils.py
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import io import os.path import subprocess import sys import netaddr from rally.common import cfg from rally.common import logging from rally.task import atomic from rally.task import utils from rally.utils import sshutils from rally_openstack.task.scenarios.nova import utils as nova_utils LOG = logging.getLogger(__name__) CONF = cfg.CONF class Host(object): ICMP_UP_STATUS = "ICMP UP" ICMP_DOWN_STATUS = "ICMP DOWN" name = "ip" def __init__(self, ip): self.ip = netaddr.IPAddress(ip) self.status = self.ICMP_DOWN_STATUS @property def id(self): return self.ip.format() @classmethod def update_status(cls, server): """Check ip address is pingable and update status.""" ping = "ping" if server.ip.version == 4 else "ping6" if sys.platform.startswith("linux"): cmd = [ping, "-c1", "-w1", server.ip.format()] else: cmd = [ping, "-c1", server.ip.format()] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc.wait() LOG.debug("Host %s is ICMP %s" % (server.ip.format(), proc.returncode and "down" or "up")) if proc.returncode == 0: server.status = cls.ICMP_UP_STATUS else: server.status = cls.ICMP_DOWN_STATUS return server def __eq__(self, other): if not isinstance(other, Host): raise TypeError("%s should be an instance of %s" % ( other, Host.__class__.__name__)) return self.ip == other.ip and self.status == other.status def __ne__(self, other): return not self.__eq__(other) class VMScenario(nova_utils.NovaScenario): """Base class for VM scenarios with basic atomic actions. VM scenarios are scenarios executed inside some launched VM instance. """ USER_RWX_OTHERS_RX_ACCESS_MODE = 0o755 RESOURCE_NAME_PREFIX = "rally_vm_" @atomic.action_timer("vm.run_command_over_ssh") def _run_command_over_ssh(self, ssh, command): """Run command inside an instance. This is a separate function so that only script execution is timed. :param ssh: A SSHClient instance. :param command: Dictionary specifying command to execute. See `rally info find VMTasks.boot_runcommand_delete' parameter `command' docstring for explanation. :returns: tuple (exit_status, stdout, stderr) """ cmd, stdin = [], None interpreter = command.get("interpreter") or [] if interpreter: if isinstance(interpreter, str): interpreter = [interpreter] elif type(interpreter) != list: raise ValueError("command 'interpreter' value must be str " "or list type") cmd.extend(interpreter) remote_path = command.get("remote_path") or [] if remote_path: if isinstance(remote_path, str): remote_path = [remote_path] elif type(remote_path) != list: raise ValueError("command 'remote_path' value must be str " "or list type") cmd.extend(remote_path) if command.get("local_path"): ssh.put_file(os.path.expanduser( command["local_path"]), remote_path[-1], mode=self.USER_RWX_OTHERS_RX_ACCESS_MODE) if command.get("script_file"): stdin = open(os.path.expanduser(command["script_file"]), "rb") elif command.get("script_inline"): stdin = io.StringIO(command["script_inline"]) cmd.extend(command.get("command_args") or []) return ssh.execute(cmd, stdin=stdin) def _boot_server_with_fip(self, image, flavor, use_floating_ip=True, floating_network=None, **kwargs): """Boot server prepared for SSH actions.""" kwargs["auto_assign_nic"] = True server = self._boot_server(image, flavor, **kwargs) if not server.networks: raise RuntimeError( "Server `%s' is not connected to any network. " "Use network context for auto-assigning networks " "or provide `nics' argument with specific net-id." % server.name) if use_floating_ip: fip = self._attach_floating_ip(server, floating_network) else: internal_network = list(server.networks)[0] fip = {"ip": server.addresses[internal_network][0]["addr"]} return server, {"ip": fip.get("ip"), "id": fip.get("id"), "is_floating": use_floating_ip} def _attach_floating_ip(self, server, floating_network): internal_network = list(server.networks)[0] fixed_ip = server.addresses[internal_network][0]["addr"] floatingip = self.neutron.create_floatingip( floating_network=floating_network) self._associate_floating_ip(server, floatingip, fixed_address=fixed_ip) return {"id": floatingip["id"], "ip": floatingip["floating_ip_address"]} def _delete_floating_ip(self, server, fip): with logging.ExceptionLogger( LOG, "Unable to delete IP: %s" % fip["ip"]): if self.check_ip_address(fip["ip"])(server): self._dissociate_floating_ip(server, fip) self.neutron.delete_floatingip(fip["id"]) def _delete_server_with_fip(self, server, fip, force_delete=False): if fip["is_floating"]: self._delete_floating_ip(server, fip) return self._delete_server(server, force=force_delete) @atomic.action_timer("vm.wait_for_ssh") def _wait_for_ssh(self, ssh, timeout=120, interval=1): ssh.wait(timeout, interval) @atomic.action_timer("vm.wait_for_ping") def _wait_for_ping(self, server_ip): server = Host(server_ip) utils.wait_for_status( server, ready_statuses=[Host.ICMP_UP_STATUS], update_resource=Host.update_status, timeout=CONF.openstack.vm_ping_timeout, check_interval=CONF.openstack.vm_ping_poll_interval ) def _run_command(self, server_ip, port, username, password, command, pkey=None, timeout=120, interval=1): """Run command via SSH on server. Create SSH connection for server, wait for server to become available (there is a delay between server being set to ACTIVE and sshd being available). Then call run_command_over_ssh to actually execute the command. :param server_ip: server ip address :param port: ssh port for SSH connection :param username: str. ssh username for server :param password: Password for SSH authentication :param command: Dictionary specifying command to execute. See `rally info find VMTasks.boot_runcommand_delete' parameter `command' docstring for explanation. :param pkey: key for SSH authentication :param timeout: wait for ssh timeout. Default is 120 seconds :param interval: ssh retry interval. Default is 1 second :returns: tuple (exit_status, stdout, stderr) """ pkey = pkey if pkey else self.context["user"]["keypair"]["private"] ssh = sshutils.SSH(username, server_ip, port=port, pkey=pkey, password=password) try: self._wait_for_ssh(ssh, timeout, interval) return self._run_command_over_ssh(ssh, command) finally: try: ssh.close() except AttributeError: pass
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,801
openstack/rally-openstack
refs/heads/master
/tests/unit/task/contexts/murano/test_murano_packages.py
# Copyright 2015: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally_openstack.task.contexts.murano import murano_packages from tests.unit import test CTX = "rally_openstack.task.contexts.murano.murano_packages" class MuranoPackageGeneratorTestCase(test.TestCase): def setUp(self): super(MuranoPackageGeneratorTestCase, self).setUp() @staticmethod def _get_context(): return { "config": { "users": { "tenants": 2, "users_per_tenant": 1, "concurrent": 1, }, "murano_packages": { "app_package": ( "rally-jobs/extra/murano/" "applications/HelloReporter/" "io.murano.apps.HelloReporter.zip") } }, "admin": { "credential": mock.MagicMock() }, "task": mock.MagicMock(), "owner_id": "foo_uuid", "users": [ { "id": "user_0", "tenant_id": "tenant_0", "credential": "credential" }, { "id": "user_1", "tenant_id": "tenant_1", "credential": "credential" } ], "tenants": { "tenant_0": {"name": "tenant_0_name"}, "tenant_1": {"name": "tenant_1_name"} } } @mock.patch("%s.osclients" % CTX) def test_setup(self, mock_osclients): mock_app = mock.MagicMock(id="fake_app_id") (mock_osclients.Clients().murano(). packages.create.return_value) = mock_app murano_ctx = murano_packages.PackageGenerator(self._get_context()) murano_ctx.setup() self.assertEqual(2, len(murano_ctx.context["tenants"])) tenant_id = murano_ctx.context["users"][0]["tenant_id"] self.assertEqual([mock_app], murano_ctx.context["tenants"][tenant_id]["packages"]) @mock.patch("%s.osclients" % CTX) @mock.patch("%s.resource_manager.cleanup" % CTX) def test_cleanup_with_zip(self, mock_cleanup, mock_osclients): mock_app = mock.Mock(id="fake_app_id") (mock_osclients.Clients().murano(). packages.create.return_value) = mock_app murano_ctx = murano_packages.PackageGenerator(self._get_context()) murano_ctx.setup() murano_ctx.cleanup() mock_cleanup.assert_called_once_with( names=["murano.packages"], users=murano_ctx.context["users"], superclass=murano_packages.PackageGenerator, task_id="foo_uuid") @mock.patch("%s.osclients" % CTX) @mock.patch("%s.resource_manager.cleanup" % CTX) def test_cleanup_with_dir(self, mock_cleanup, mock_osclients): mock_app = mock.Mock(id="fake_app_id") (mock_osclients.Clients().murano(). packages.create.return_value) = mock_app ctx_dict = self._get_context() app_dir = ("rally-jobs/extra/murano/applications/" "HelloReporter/io.murano.apps.HelloReporter/") ctx_dict["config"]["murano_packages"]["app_package"] = app_dir murano_ctx = murano_packages.PackageGenerator(ctx_dict) murano_ctx.setup() murano_ctx.cleanup() mock_cleanup.assert_called_once_with( names=["murano.packages"], users=murano_ctx.context["users"], superclass=murano_packages.PackageGenerator, task_id="foo_uuid")
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,802
openstack/rally-openstack
refs/heads/master
/rally_openstack/common/services/gnocchi/metric.py
# Copyright 2017 Red Hat, Inc. <http://www.redhat.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.task import atomic from rally.task import service class GnocchiService(service.Service): @atomic.action_timer("gnocchi.create_archive_policy") def create_archive_policy(self, name, definition=None, aggregation_methods=None): """Create an archive policy. :param name: Archive policy name :param definition: Archive policy definition :param aggregation_methods: Aggregation method of the archive policy """ archive_policy = {"name": name} if definition is not None: archive_policy["definition"] = definition if aggregation_methods is not None: archive_policy["aggregation_methods"] = aggregation_methods return self._clients.gnocchi().archive_policy.create( archive_policy) @atomic.action_timer("gnocchi.delete_archive_policy") def delete_archive_policy(self, name): """Delete an archive policy. :param name: Archive policy name """ return self._clients.gnocchi().archive_policy.delete(name) @atomic.action_timer("gnocchi.list_archive_policy") def list_archive_policy(self): """List archive policies.""" return self._clients.gnocchi().archive_policy.list() @atomic.action_timer("gnocchi.create_archive_policy_rule") def create_archive_policy_rule(self, name, metric_pattern=None, archive_policy_name=None): """Create an archive policy rule. :param name: Archive policy rule name :param metric_pattern: Wildcard of metric name to match :param archive_policy_name: Archive policy name """ archive_policy_rule = {"name": name} archive_policy_rule["metric_pattern"] = metric_pattern archive_policy_rule["archive_policy_name"] = archive_policy_name return self._clients.gnocchi().archive_policy_rule.create( archive_policy_rule) @atomic.action_timer("gnocchi.delete_archive_policy_rule") def delete_archive_policy_rule(self, name): """Delete an archive policy rule. :param name: Archive policy rule name """ return self._clients.gnocchi().archive_policy_rule.delete(name) @atomic.action_timer("gnocchi.list_archive_policy_rule") def list_archive_policy_rule(self): """List archive policy rules.""" return self._clients.gnocchi().archive_policy_rule.list() @atomic.action_timer("gnocchi.list_capabilities") def list_capabilities(self): """List capabilities.""" return self._clients.gnocchi().capabilities.list() @atomic.action_timer("gnocchi.get_measures_aggregation") def get_measures_aggregation(self, metrics, aggregation=None, refresh=None): """Get measurements of aggregated metrics. :param metrics: Metric IDs or name :param aggregation: Granularity aggregation function to retrieve :param refresh: Force aggregation of all known measures """ return self._clients.gnocchi().metric.aggregation( metrics=metrics, aggregation=aggregation, refresh=refresh) @atomic.action_timer("gnocchi.get_measures") def get_measures(self, metric, aggregation=None, refresh=None): """Get measurements of a metric. :param metric: Metric ID or name :param aggregation: Aggregation to retrieve :param refresh: Force aggregation of all known measures """ return self._clients.gnocchi().metric.get_measures( metric=metric, aggregation=aggregation, refresh=refresh) @atomic.action_timer("gnocchi.create_metric") def create_metric(self, name, archive_policy_name=None, resource_id=None, unit=None): """Create a metric. :param name: Metric name :param archive_policy_name: Archive policy name :param resource_id: The resource ID to attach the metric to :param unit: The unit of the metric """ return self._clients.gnocchi().metric.create( name=name, archive_policy_name=archive_policy_name, resource_id=resource_id, unit=unit) @atomic.action_timer("gnocchi.delete_metric") def delete_metric(self, metric_id): """Delete a metric. :param metric_id: metric ID """ return self._clients.gnocchi().metric.delete(metric_id) @atomic.action_timer("gnocchi.list_metric") def list_metric(self, limit=None): """List metrics.""" metrics = [] marker = None limit_val = limit while True: page = self._clients.gnocchi().metric.list(limit=limit_val, marker=marker) if not page: break metrics.extend(page) marker = page[-1]["id"] if limit_val is not None: cnt = len(metrics) if cnt < limit: limit_val = limit - cnt else: break return metrics @atomic.action_timer("gnocchi.create_resource") def create_resource(self, name, resource_type="generic"): """Create a resource. :param name: Name of the resource :param resource_type: Type of the resource """ resource = {"id": name} return self._clients.gnocchi().resource.create( resource_type, resource) @atomic.action_timer("gnocchi.delete_resource") def delete_resource(self, resource_id): """Delete a resource. :param resource_id: ID of the resource """ return self._clients.gnocchi().resource.delete(resource_id) @atomic.action_timer("gnocchi.list_resource") def list_resource(self, resource_type="generic"): """List resources.""" return self._clients.gnocchi().resource.list( resource_type=resource_type) @atomic.action_timer("gnocchi.create_resource_type") def create_resource_type(self, name, attributes=None): """Create a resource type. :param name: Name of the resource type """ resource_type = {"name": name} if attributes is not None: resource_type["attributes"] = attributes return self._clients.gnocchi().resource_type.create( resource_type) @atomic.action_timer("gnocchi.delete_resource_type") def delete_resource_type(self, name): """Delete a resource type. :param name: Name of the resource type """ return self._clients.gnocchi().resource_type.delete(name) @atomic.action_timer("gnocchi.list_resource_type") def list_resource_type(self): """List resource types.""" return self._clients.gnocchi().resource_type.list() @atomic.action_timer("gnocchi.get_status") def get_status(self, detailed=False): """Get the status of measurements processing. :param detailed: Get detailed status. """ return self._clients.gnocchi().status.get(detailed)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,803
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/manila/shares.py
# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import logging from rally import exceptions from rally.task import types from rally.task import utils as rally_utils from rally.task import validation from rally_openstack.common import consts from rally_openstack.task.contexts.manila import consts as manila_consts from rally_openstack.task import scenario from rally_openstack.task.scenarios.manila import utils from rally_openstack.task.scenarios.vm import utils as vm_utils """Scenarios for Manila shares.""" LOG = logging.getLogger(__name__) @validation.add("enum", param_name="share_proto", values=["NFS", "CIFS", "GLUSTERFS", "HDFS", "CEPHFS"], case_insensitive=True, missed=False) @validation.add("required_services", services=[consts.Service.MANILA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["manila"]}, name="ManilaShares.create_and_delete_share", platform="openstack") class CreateAndDeleteShare(utils.ManilaScenario): def run(self, share_proto, size=1, min_sleep=0, max_sleep=0, **kwargs): """Create and delete a share. Optional 'min_sleep' and 'max_sleep' parameters allow the scenario to simulate a pause between share creation and deletion (of random duration from [min_sleep, max_sleep]). :param share_proto: share protocol, valid values are NFS, CIFS, GlusterFS and HDFS :param size: share size in GB, should be greater than 0 :param min_sleep: minimum sleep time in seconds (non-negative) :param max_sleep: maximum sleep time in seconds (non-negative) :param kwargs: optional args to create a share """ share = self._create_share( share_proto=share_proto, size=size, **kwargs) self.sleep_between(min_sleep, max_sleep) self._delete_share(share) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image", fail_on_404_image=False) @validation.add("number", param_name="port", minval=1, maxval=65535, nullable=True, integer_only=True) @validation.add("external_network_exists", param_name="floating_network") @validation.add("required_services", services=[consts.Service.MANILA, consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["manila", "nova"], "keypair@openstack": {}, "allow_ssh@openstack": None}, name="ManilaShares.create_share_and_access_from_vm", platform="openstack") class CreateShareAndAccessFromVM(utils.ManilaScenario, vm_utils.VMScenario): def run(self, image, flavor, username, size=1, password=None, floating_network=None, port=22, use_floating_ip=True, force_delete=False, max_log_length=None, **kwargs): """Create a share and access it from a VM. - create NFS share - launch VM - authorize VM's fip to access the share - mount share iside the VM - write to share - delete VM - delete share :param size: share size in GB, should be greater than 0 :param image: glance image name to use for the vm :param flavor: VM flavor name :param username: ssh username on server :param password: Password on SSH authentication :param floating_network: external network name, for floating ip :param port: ssh port for SSH connection :param use_floating_ip: bool, floating or fixed IP for SSH connection :param force_delete: whether to use force_delete for servers :param max_log_length: The number of tail nova console-log lines user would like to retrieve :param kwargs: optional args to create a share or a VM """ share_proto = "nfs" share = self._create_share( share_proto=share_proto, size=size, **kwargs) location = self._export_location(share) server, fip = self._boot_server_with_fip( image, flavor, use_floating_ip=use_floating_ip, floating_network=floating_network, key_name=self.context["user"]["keypair"]["name"], userdata="#cloud-config\npackages:\n - nfs-common", **kwargs) self._allow_access_share(share, "ip", fip["ip"], "rw") mount_opt = "-t nfs -o nfsvers=4.1,proto=tcp" script = f"sudo cloud-init status -w;" \ f"sudo mount {mount_opt} {location[0]} /mnt || exit 1;" \ f"sudo touch /mnt/testfile || exit 2" command = { "script_inline": script, "interpreter": "/bin/bash" } try: rally_utils.wait_for_status( server, ready_statuses=["ACTIVE"], update_resource=rally_utils.get_from_manager(), ) code, out, err = self._run_command( fip["ip"], port, username, password, command=command) if code: raise exceptions.ScriptError( "Error running command %(command)s. " "Error %(code)s: %(error)s" % { "command": command, "code": code, "error": err}) except (exceptions.TimeoutException, exceptions.SSHTimeout): console_logs = self._get_server_console_output(server, max_log_length) LOG.debug("VM console logs:\n%s" % console_logs) raise finally: self._delete_server_with_fip(server, fip, force_delete=force_delete) self._delete_share(share) self.add_output(complete={ "title": "Script StdOut", "chart_plugin": "TextArea", "data": str(out).split("\n") }) if err: self.add_output(complete={ "title": "Script StdErr", "chart_plugin": "TextArea", "data": err.split("\n") }) @validation.add("required_services", services=[consts.Service.MANILA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(name="ManilaShares.list_shares", platform="openstack") class ListShares(utils.ManilaScenario): def run(self, detailed=True, search_opts=None): """Basic scenario for 'share list' operation. :param detailed: defines either to return detailed list of objects or not. :param search_opts: container of search opts such as "name", "host", "share_type", etc. """ self._list_shares(detailed=detailed, search_opts=search_opts) @validation.add("enum", param_name="share_proto", values=["NFS", "CIFS", "GLUSTERFS", "HDFS", "CEPHFS"], case_insensitive=True) @validation.add("required_services", services=[consts.Service.MANILA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["manila"]}, name="ManilaShares.create_and_extend_share", platform="openstack") class CreateAndExtendShare(utils.ManilaScenario): def run(self, share_proto, size=1, new_size=2, snapshot_id=None, description=None, metadata=None, share_network=None, share_type=None, is_public=False, availability_zone=None, share_group_id=None): """Create and extend a share :param share_proto: share protocol for new share available values are NFS, CIFS, CephFS, GlusterFS and HDFS. :param size: size in GiB :param new_size: new size of the share in GiB :param snapshot_id: ID of the snapshot :param description: description of a share :param metadata: optional metadata to set on share creation :param share_network: either instance of ShareNetwork or text with ID :param share_type: either instance of ShareType or text with ID :param is_public: whether to set share as public or not. :param availability_zone: availability zone of the share :param share_group_id: ID of the share group to which the share should belong """ share = self._create_share( share_proto=share_proto, size=size, snapshot_id=snapshot_id, description=description, metadata=metadata, share_network=share_network, share_type=share_type, is_public=is_public, availability_zone=availability_zone, share_group_id=share_group_id ) self._extend_share(share, new_size) @validation.add("enum", param_name="share_proto", values=["NFS", "CIFS", "GLUSTERFS", "HDFS", "CEPHFS"], case_insensitive=True) @validation.add("required_services", services=[consts.Service.MANILA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["manila"]}, name="ManilaShares.create_and_shrink_share", platform="openstack") class CreateAndShrinkShare(utils.ManilaScenario): def run(self, share_proto, size=2, new_size=1, snapshot_id=None, description=None, metadata=None, share_network=None, share_type=None, is_public=False, availability_zone=None, share_group_id=None): """Create and shrink a share :param share_proto: share protocol for new share available values are NFS, CIFS, CephFS, GlusterFS and HDFS. :param size: size in GiB :param new_size: new size of the share in GiB :param snapshot_id: ID of the snapshot :param description: description of a share :param metadata: optional metadata to set on share creation :param share_network: either instance of ShareNetwork or text with ID :param share_type: either instance of ShareType or text with ID :param is_public: whether to set share as public or not. :param availability_zone: availability zone of the share :param share_group_id: ID of the share group to which the share should belong """ share = self._create_share( share_proto=share_proto, size=size, snapshot_id=snapshot_id, description=description, metadata=metadata, share_network=share_network, share_type=share_type, is_public=is_public, availability_zone=availability_zone, share_group_id=share_group_id ) self._shrink_share(share, new_size) @validation.add("required_services", services=[consts.Service.MANILA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["manila"]}, name="ManilaShares.create_share_network_and_delete", platform="openstack") class CreateShareNetworkAndDelete(utils.ManilaScenario): @logging.log_deprecated_args( "The 'name' argument to create_and_delete_service will be ignored", "1.1.2", ["name"], once=True) def run(self, neutron_net_id=None, neutron_subnet_id=None, nova_net_id=None, name=None, description=None): """Creates share network and then deletes. :param neutron_net_id: ID of Neutron network :param neutron_subnet_id: ID of Neutron subnet :param nova_net_id: ID of Nova network :param description: share network description """ share_network = self._create_share_network( neutron_net_id=neutron_net_id, neutron_subnet_id=neutron_subnet_id, nova_net_id=nova_net_id, description=description, ) self._delete_share_network(share_network) @validation.add("required_services", services=[consts.Service.MANILA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["manila"]}, name="ManilaShares.create_share_network_and_list", platform="openstack") class CreateShareNetworkAndList(utils.ManilaScenario): @logging.log_deprecated_args( "The 'name' argument to create_and_delete_service will be ignored", "1.1.2", ["name"], once=True) def run(self, neutron_net_id=None, neutron_subnet_id=None, nova_net_id=None, name=None, description=None, detailed=True, search_opts=None): """Creates share network and then lists it. :param neutron_net_id: ID of Neutron network :param neutron_subnet_id: ID of Neutron subnet :param nova_net_id: ID of Nova network :param description: share network description :param detailed: defines either to return detailed list of objects or not. :param search_opts: container of search opts such as "name", "nova_net_id", "neutron_net_id", etc. """ self._create_share_network( neutron_net_id=neutron_net_id, neutron_subnet_id=neutron_subnet_id, nova_net_id=nova_net_id, description=description, ) self._list_share_networks( detailed=detailed, search_opts=search_opts, ) @validation.add("required_services", services=[consts.Service.MANILA]) @validation.add("required_platform", platform="openstack", admin=True) @scenario.configure(name="ManilaShares.list_share_servers", platform="openstack") class ListShareServers(utils.ManilaScenario): def run(self, search_opts=None): """Lists share servers. Requires admin creds. :param search_opts: container of following search opts: "host", "status", "share_network" and "project_id". """ self._list_share_servers(search_opts=search_opts) @validation.add("enum", param_name="share_proto", values=["nfs", "cephfs", "cifs", "glusterfs", "hdfs"], missed=False, case_insensitive=True) @validation.add("required_services", services=[consts.Service.MANILA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure( context={"cleanup@openstack": ["manila"]}, name="ManilaShares.create_share_then_allow_and_deny_access") class CreateShareThenAllowAndDenyAccess(utils.ManilaScenario): def run(self, share_proto, access_type, access, access_level="rw", size=1, snapshot_id=None, description=None, metadata=None, share_network=None, share_type=None, is_public=False, availability_zone=None, share_group_id=None): """Create a share and allow and deny access to it :param share_proto: share protocol for new share available values are NFS, CIFS, CephFS, GlusterFS and HDFS. :param access_type: represents the access type (e.g: 'ip', 'domain'...) :param access: represents the object (e.g: '127.0.0.1'...) :param access_level: access level to the share (e.g: 'rw', 'ro') :param size: size in GiB :param new_size: new size of the share in GiB :param snapshot_id: ID of the snapshot :param description: description of a share :param metadata: optional metadata to set on share creation :param share_network: either instance of ShareNetwork or text with ID :param share_type: either instance of ShareType or text with ID :param is_public: whether to set share as public or not. :param availability_zone: availability zone of the share :param share_group_id: ID of the share group to which the share should belong """ share = self._create_share( share_proto=share_proto, size=size, snapshot_id=snapshot_id, description=description, metadata=metadata, share_network=share_network, share_type=share_type, is_public=is_public, availability_zone=availability_zone, share_group_id=share_group_id ) access_result = self._allow_access_share(share, access_type, access, access_level) self._deny_access_share(share, access_result["id"]) @validation.add("required_services", services=[consts.Service.MANILA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["manila"]}, name="ManilaShares.create_security_service_and_delete", platform="openstack") class CreateSecurityServiceAndDelete(utils.ManilaScenario): @logging.log_deprecated_args( "The 'name' argument to create_and_delete_service will be ignored", "1.1.2", ["name"], once=True) def run(self, security_service_type, dns_ip=None, server=None, domain=None, user=None, password=None, name=None, description=None): """Creates security service and then deletes. :param security_service_type: security service type, permitted values are 'ldap', 'kerberos' or 'active_directory'. :param dns_ip: dns ip address used inside tenant's network :param server: security service server ip address or hostname :param domain: security service domain :param user: security identifier used by tenant :param password: password used by user :param description: security service description """ security_service = self._create_security_service( security_service_type=security_service_type, dns_ip=dns_ip, server=server, domain=domain, user=user, password=password, description=description, ) self._delete_security_service(security_service) @validation.add("required_services", services=[consts.Service.MANILA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure( context={"cleanup@openstack": ["manila"]}, name="ManilaShares.attach_security_service_to_share_network", platform="openstack") class AttachSecurityServiceToShareNetwork(utils.ManilaScenario): def run(self, security_service_type="ldap"): """Attaches security service to share network. :param security_service_type: type of security service to use. Should be one of following: 'ldap', 'kerberos' or 'active_directory'. """ sn = self._create_share_network() ss = self._create_security_service( security_service_type=security_service_type) self._add_security_service_to_share_network(sn, ss) @validation.add("enum", param_name="share_proto", values=["NFS", "CIFS", "GLUSTERFS", "HDFS", "CEPHFS"], case_insensitive=True) @validation.add("required_services", services=[consts.Service.MANILA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["manila"]}, name="ManilaShares.create_and_list_share", platform="openstack") class CreateAndListShare(utils.ManilaScenario): def run(self, share_proto, size=1, min_sleep=0, max_sleep=0, detailed=True, **kwargs): """Create a share and list all shares. Optional 'min_sleep' and 'max_sleep' parameters allow the scenario to simulate a pause between share creation and list (of random duration from [min_sleep, max_sleep]). :param share_proto: share protocol, valid values are NFS, CIFS, GlusterFS and HDFS :param size: share size in GB, should be greater than 0 :param min_sleep: minimum sleep time in seconds (non-negative) :param max_sleep: maximum sleep time in seconds (non-negative) :param detailed: defines whether to get detailed list of shares or not :param kwargs: optional args to create a share """ self._create_share(share_proto=share_proto, size=size, **kwargs) self.sleep_between(min_sleep, max_sleep) self._list_shares(detailed=detailed) @validation.add("number", param_name="sets", minval=1, integer_only=True) @validation.add("number", param_name="set_size", minval=1, integer_only=True) @validation.add("number", param_name="key_min_length", minval=1, maxval=256, integer_only=True) @validation.add("number", param_name="key_max_length", minval=1, maxval=256, integer_only=True) @validation.add("number", param_name="value_min_length", minval=1, maxval=1024, integer_only=True) @validation.add("number", param_name="value_max_length", minval=1, maxval=1024, integer_only=True) @validation.add("required_services", services=[consts.Service.MANILA]) @validation.add("required_platform", platform="openstack", users=True) @validation.add("required_contexts", contexts=manila_consts.SHARES_CONTEXT_NAME) @scenario.configure(context={"cleanup@openstack": ["manila"]}, name="ManilaShares.set_and_delete_metadata", platform="openstack") class SetAndDeleteMetadata(utils.ManilaScenario): def run(self, sets=10, set_size=3, delete_size=3, key_min_length=1, key_max_length=256, value_min_length=1, value_max_length=1024): """Sets and deletes share metadata. This requires a share to be created with the shares context. Additionally, ``sets * set_size`` must be greater than or equal to ``deletes * delete_size``. :param sets: how many set_metadata operations to perform :param set_size: number of metadata keys to set in each set_metadata operation :param delete_size: number of metadata keys to delete in each delete_metadata operation :param key_min_length: minimal size of metadata key to set :param key_max_length: maximum size of metadata key to set :param value_min_length: minimal size of metadata value to set :param value_max_length: maximum size of metadata value to set """ shares = self.context.get("tenant", {}).get("shares", []) share = shares[self.context["iteration"] % len(shares)] keys = self._set_metadata( share=share, sets=sets, set_size=set_size, key_min_length=key_min_length, key_max_length=key_max_length, value_min_length=value_min_length, value_max_length=value_max_length) self._delete_metadata(share=share, keys=keys, delete_size=delete_size)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,804
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/contexts/quotas/manila_quotas.py
# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class ManilaQuotas(object): """Management of Manila quotas.""" QUOTAS_SCHEMA = { "type": "object", "additionalProperties": False, "properties": { "shares": { "type": "integer", "minimum": -1 }, "gigabytes": { "type": "integer", "minimum": -1 }, "snapshots": { "type": "integer", "minimum": -1 }, "snapshot_gigabytes": { "type": "integer", "minimum": -1 }, "share_networks": { "type": "integer", "minimum": -1 } } } def __init__(self, clients): self.clients = clients def update(self, tenant_id, **kwargs): self.clients.manila().quotas.update(tenant_id, **kwargs) def delete(self, tenant_id): self.clients.manila().quotas.delete(tenant_id) def get(self, tenant_id): response = self.clients.manila().quotas.get(tenant_id) return dict([(k, getattr(response, k)) for k in self.QUOTAS_SCHEMA["properties"]])
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,805
openstack/rally-openstack
refs/heads/master
/tests/unit/task/contexts/manila/test_manila_share_networks.py
# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock import ddt from rally import exceptions from rally_openstack.task.contexts.manila import consts from rally_openstack.task.contexts.manila import manila_share_networks from tests.unit import test MANILA_UTILS_PATH = ( "rally_openstack.task.scenarios.manila.utils.ManilaScenario.") MOCK_USER_CREDENTIAL = mock.MagicMock() class Fake(object): def __init__(self, **kwargs): for k, v in kwargs.items(): setattr(self, k, v) def __getitem__(self, item): return getattr(self, item) def to_dict(self): return self.__dict__ @ddt.ddt class ShareNetworksTestCase(test.TestCase): TENANTS_AMOUNT = 3 USERS_PER_TENANT = 4 SECURITY_SERVICES = [ {"type": ss_type, "dns_ip": "fake_dns_ip_%s" % ss_type, "server": "fake_server_%s" % ss_type, "domain": "fake_domain_%s" % ss_type, "user": "fake_user_%s" % ss_type, "password": "fake_password_%s" % ss_type, "name": "fake_optional_name_%s" % ss_type} for ss_type in ("ldap", "kerberos", "active_directory") ] def _get_context(self, use_security_services=False, networks_per_tenant=2, neutron_network_provider=True): tenants = {} for t_id in range(self.TENANTS_AMOUNT): tenants[str(t_id)] = {"name": str(t_id)} tenants[str(t_id)]["networks"] = [] for i in range(networks_per_tenant): network = {"id": "fake_net_id_%s" % i} if neutron_network_provider: network["subnets"] = ["fake_subnet_id_of_net_%s" % i] else: network["cidr"] = "101.0.5.0/24" tenants[str(t_id)]["networks"].append(network) users = [] for t_id in tenants.keys(): for i in range(self.USERS_PER_TENANT): users.append({ "id": i, "tenant_id": t_id, "credential": MOCK_USER_CREDENTIAL}) context = { "config": { "users": { "tenants": self.TENANTS_AMOUNT, "users_per_tenant": self.USERS_PER_TENANT, "random_user_choice": False, }, consts.SHARE_NETWORKS_CONTEXT_NAME: { "use_share_networks": True, "share_networks": [], }, consts.SECURITY_SERVICES_CONTEXT_NAME: { "security_services": ( self.SECURITY_SERVICES if use_security_services else []) }, "network": { "networks_per_tenant": networks_per_tenant, "start_cidr": "101.0.5.0/24", }, }, "admin": { "credential": mock.MagicMock(), }, "task": mock.MagicMock(), "users": users, "tenants": tenants, "user_choice_method": "random", } return context def setUp(self): super(self.__class__, self).setUp() self.ctxt_use_existing = { "task": mock.MagicMock(), "config": { "existing_users": {"foo": "bar"}, consts.SHARE_NETWORKS_CONTEXT_NAME: { "use_share_networks": True, "share_networks": { "tenant_1_id": ["sn_1_id", "sn_2_name"], "tenant_2_name": ["sn_3_id", "sn_4_name", "sn_5_id"], }, }, }, "tenants": { "tenant_1_id": {"id": "tenant_1_id", "name": "tenant_1_name"}, "tenant_2_id": {"id": "tenant_2_id", "name": "tenant_2_name"}, }, "users": [ {"tenant_id": "tenant_1_id", "credential": mock.MagicMock()}, {"tenant_id": "tenant_2_id", "credential": mock.MagicMock()}, ], } self.existing_sns = [ Fake(id="sn_%s_id" % i, name="sn_%s_name" % i) for i in range(1, 6) ] def test_init(self): context = { "task": mock.MagicMock(), "config": { consts.SHARE_NETWORKS_CONTEXT_NAME: {"foo": "bar"}, "not_manila": {"not_manila_key": "not_manila_value"}, }, } inst = manila_share_networks.ShareNetworks(context) self.assertEqual( {"foo": "bar", "share_networks": {}, "use_share_networks": False}, inst.config) def test_setup_share_networks_disabled(self): ctxt = { "task": mock.MagicMock(), "config": { consts.SHARE_NETWORKS_CONTEXT_NAME: { "use_share_networks": False, }, }, consts.SHARE_NETWORKS_CONTEXT_NAME: {}, } inst = manila_share_networks.ShareNetworks(ctxt) expected_ctxt = copy.deepcopy(inst.context) inst.setup() self.assertEqual(expected_ctxt, inst.context) @mock.patch("rally_openstack.common.osclients.Clients") @mock.patch(MANILA_UTILS_PATH + "_list_share_networks") def test_setup_use_existing_share_networks( self, mock_manila_scenario__list_share_networks, mock_clients): existing_sns = self.existing_sns expected_ctxt = copy.deepcopy(self.ctxt_use_existing) inst = manila_share_networks.ShareNetworks(self.ctxt_use_existing) mock_manila_scenario__list_share_networks.return_value = ( self.existing_sns) expected_ctxt.update({ "delete_share_networks": False, "tenants": { "tenant_1_id": { "id": "tenant_1_id", "name": "tenant_1_name", consts.SHARE_NETWORKS_CONTEXT_NAME: { "share_networks": [ sn.to_dict() for sn in existing_sns[0:2]], }, }, "tenant_2_id": { "id": "tenant_2_id", "name": "tenant_2_name", consts.SHARE_NETWORKS_CONTEXT_NAME: { "share_networks": [ sn.to_dict() for sn in existing_sns[2:5]], }, }, } }) inst.setup() self.assertEqual(expected_ctxt["task"], inst.context.get("task")) self.assertEqual(expected_ctxt["config"], inst.context.get("config")) self.assertEqual(expected_ctxt["users"], inst.context.get("users")) self.assertFalse( inst.context.get(consts.SHARE_NETWORKS_CONTEXT_NAME, {}).get( "delete_share_networks")) self.assertEqual(expected_ctxt["tenants"], inst.context.get("tenants")) def test_setup_use_existing_share_networks_tenant_not_found(self): ctxt = copy.deepcopy(self.ctxt_use_existing) ctxt.update({"tenants": {}}) inst = manila_share_networks.ShareNetworks(ctxt) self.assertRaises(exceptions.ContextSetupFailure, inst.setup) @mock.patch("rally_openstack.common.osclients.Clients") @mock.patch(MANILA_UTILS_PATH + "_list_share_networks") def test_setup_use_existing_share_networks_sn_not_found( self, mock_manila_scenario__list_share_networks, mock_clients): ctxt = copy.deepcopy(self.ctxt_use_existing) ctxt["config"][consts.SHARE_NETWORKS_CONTEXT_NAME][ "share_networks"] = {"tenant_1_id": ["foo"]} inst = manila_share_networks.ShareNetworks(ctxt) mock_manila_scenario__list_share_networks.return_value = ( self.existing_sns) self.assertRaises(exceptions.ContextSetupFailure, inst.setup) def test_setup_use_existing_share_networks_with_empty_list(self): ctxt = copy.deepcopy(self.ctxt_use_existing) ctxt["config"][consts.SHARE_NETWORKS_CONTEXT_NAME][ "share_networks"] = {} inst = manila_share_networks.ShareNetworks(ctxt) self.assertRaises(exceptions.ContextSetupFailure, inst.setup) @ddt.data(True, False) @mock.patch("rally_openstack.common.osclients.Clients") @mock.patch(MANILA_UTILS_PATH + "_create_share_network") @mock.patch(MANILA_UTILS_PATH + "_add_security_service_to_share_network") def test_setup_autocreate_share_networks_with_security_services( self, neutron, mock_manila_scenario__add_security_service_to_share_network, mock_manila_scenario__create_share_network, mock_clients): networks_per_tenant = 2 ctxt = self._get_context( networks_per_tenant=networks_per_tenant, neutron_network_provider=neutron, use_security_services=True, ) inst = manila_share_networks.ShareNetworks(ctxt) for tenant_id in list(ctxt["tenants"].keys()): inst.context["tenants"][tenant_id][ consts.SECURITY_SERVICES_CONTEXT_NAME] = { "security_services": [ Fake(id="fake_id").to_dict() for i in (1, 2, 3) ] } inst.setup() self.assertEqual(ctxt["task"], inst.context.get("task")) self.assertEqual(ctxt["config"], inst.context.get("config")) self.assertEqual(ctxt["users"], inst.context.get("users")) self.assertEqual(ctxt["tenants"], inst.context.get("tenants")) mock_add_security_service_to_share_network = ( mock_manila_scenario__add_security_service_to_share_network) mock_add_security_service_to_share_network.assert_has_calls([ mock.call(mock.ANY, mock.ANY) for _ in range( self.TENANTS_AMOUNT * networks_per_tenant * len(self.SECURITY_SERVICES))]) if neutron: sn_args = { "neutron_net_id": mock.ANY, "neutron_subnet_id": mock.ANY, } else: sn_args = {"nova_net_id": mock.ANY} expected_calls = [ mock.call(**sn_args), mock.call().to_dict(), mock.ANY, mock.ANY, mock.ANY, ] mock_manila_scenario__create_share_network.assert_has_calls( expected_calls * (self.TENANTS_AMOUNT * networks_per_tenant)) mock_clients.assert_has_calls([mock.call(MOCK_USER_CREDENTIAL) for i in range(self.TENANTS_AMOUNT)]) @ddt.data(True, False) @mock.patch("rally_openstack.common.osclients.Clients") @mock.patch(MANILA_UTILS_PATH + "_create_share_network") @mock.patch(MANILA_UTILS_PATH + "_add_security_service_to_share_network") def test_setup_autocreate_share_networks_wo_security_services( self, neutron, mock_manila_scenario__add_security_service_to_share_network, mock_manila_scenario__create_share_network, mock_clients): networks_per_tenant = 2 ctxt = self._get_context( networks_per_tenant=networks_per_tenant, neutron_network_provider=neutron, ) inst = manila_share_networks.ShareNetworks(ctxt) inst.setup() self.assertEqual(ctxt["task"], inst.context.get("task")) self.assertEqual(ctxt["config"], inst.context.get("config")) self.assertEqual(ctxt["users"], inst.context.get("users")) self.assertEqual(ctxt["tenants"], inst.context.get("tenants")) self.assertFalse( mock_manila_scenario__add_security_service_to_share_network.called) if neutron: sn_args = { "neutron_net_id": mock.ANY, "neutron_subnet_id": mock.ANY, } else: sn_args = {"nova_net_id": mock.ANY} expected_calls = [mock.call(**sn_args), mock.call().to_dict()] mock_manila_scenario__create_share_network.assert_has_calls( expected_calls * (self.TENANTS_AMOUNT * networks_per_tenant)) mock_clients.assert_has_calls([mock.call(MOCK_USER_CREDENTIAL) for i in range(self.TENANTS_AMOUNT)]) @mock.patch("rally_openstack.common.osclients.Clients") @mock.patch(MANILA_UTILS_PATH + "_create_share_network") @mock.patch(MANILA_UTILS_PATH + "_add_security_service_to_share_network") def test_setup_autocreate_share_networks_wo_networks( self, mock_manila_scenario__add_security_service_to_share_network, mock_manila_scenario__create_share_network, mock_clients): ctxt = self._get_context(networks_per_tenant=0) inst = manila_share_networks.ShareNetworks(ctxt) inst.setup() self.assertEqual(ctxt["task"], inst.context.get("task")) self.assertEqual(ctxt["config"], inst.context.get("config")) self.assertEqual(ctxt["users"], inst.context.get("users")) self.assertEqual(ctxt["tenants"], inst.context.get("tenants")) self.assertFalse( mock_manila_scenario__add_security_service_to_share_network.called) expected_calls = [mock.call(), mock.call().to_dict()] mock_manila_scenario__create_share_network.assert_has_calls( expected_calls * self.TENANTS_AMOUNT) mock_clients.assert_has_calls([mock.call(MOCK_USER_CREDENTIAL) for i in range(self.TENANTS_AMOUNT)]) @mock.patch("rally_openstack.common.osclients.Clients") @mock.patch(MANILA_UTILS_PATH + "_delete_share_network") @mock.patch(MANILA_UTILS_PATH + "_list_share_servers") @mock.patch(MANILA_UTILS_PATH + "_list_share_networks") def test_cleanup_used_existing_share_networks( self, mock_manila_scenario__list_share_networks, mock_manila_scenario__list_share_servers, mock_manila_scenario__delete_share_network, mock_clients): inst = manila_share_networks.ShareNetworks(self.ctxt_use_existing) mock_manila_scenario__list_share_networks.return_value = ( self.existing_sns) inst.setup() inst.cleanup() self.assertFalse(mock_manila_scenario__list_share_servers.called) self.assertFalse(mock_manila_scenario__delete_share_network.called) self.assertEqual(2, mock_clients.call_count) for user in self.ctxt_use_existing["users"]: self.assertIn(mock.call(user["credential"]), mock_clients.mock_calls) @mock.patch("rally_openstack.task.contexts.manila.manila_share_networks." "resource_manager.cleanup") def test_cleanup_autocreated_share_networks(self, mock_cleanup): task_id = "task" ctxt = { "config": {"manila_share_networks": { "use_share_networks": True}}, "users": [mock.Mock()], "task": {"uuid": task_id}} inst = manila_share_networks.ShareNetworks(ctxt) inst.cleanup() mock_cleanup.assert_called_once_with( names=["manila.share_networks"], users=ctxt["users"], superclass=manila_share_networks.ShareNetworks, task_id=task_id)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,806
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/contexts/murano/murano_environments.py
# Copyright 2016: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import validation from rally_openstack.common import consts from rally_openstack.task.cleanup import manager as resource_manager from rally_openstack.task import context from rally_openstack.task.scenarios.murano import utils as murano_utils @validation.add("required_platform", platform="openstack", users=True) @context.configure(name="murano_environments", platform="openstack", order=402) class EnvironmentGenerator(context.OpenStackContext): """Context class for creating murano environments.""" CONFIG_SCHEMA = { "type": "object", "$schema": consts.JSON_SCHEMA, "properties": { "environments_per_tenant": { "type": "integer", "minimum": 1 }, }, "required": ["environments_per_tenant"], "additionalProperties": False } def setup(self): for user, tenant_id in self._iterate_per_tenants(): self.context["tenants"][tenant_id]["environments"] = [] for i in range(self.config["environments_per_tenant"]): murano_util = murano_utils.MuranoScenario( {"user": user, "task": self.context["task"], "owner_id": self.context["owner_id"], "config": self.context["config"]}) env = murano_util._create_environment() self.context["tenants"][tenant_id]["environments"].append(env) def cleanup(self): resource_manager.cleanup(names=["murano.environments"], users=self.context.get("users", []), superclass=murano_utils.MuranoScenario, task_id=self.get_owner_id())
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,807
openstack/rally-openstack
refs/heads/master
/tests/unit/common/services/barbican/test_secrets.py
# Copyright 2018 Red Hat, Inc. <http://www.redhat.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally_openstack.common.services.key_manager import barbican from tests.unit import test class BarbicanServiceTestCase(test.TestCase): def setUp(self): super(BarbicanServiceTestCase, self).setUp() self.clients = mock.MagicMock() self.name_generator = mock.MagicMock() self.service = barbican.BarbicanService( self.clients, name_generator=self.name_generator) def atomic_actions(self): return self.service._atomic_actions def test__list_secrets(self): self.assertEqual( self.service.list_secrets(), self.service._clients.barbican().secrets.list.return_value ) self._test_atomic_action_timer(self.atomic_actions(), "barbican.list_secrets") def test__create_secret(self): self.assertEqual( self.service.create_secret(), self.service._clients.barbican().secrets.create( name="fake_secret", payload="rally_data") ) self._test_atomic_action_timer(self.atomic_actions(), "barbican.create_secret") def test__get_secret(self): self.service.get_secret("fake_secret") self.service._clients.barbican().secrets.get \ .assert_called_once_with("fake_secret") self._test_atomic_action_timer(self.atomic_actions(), "barbican.get_secret") def test__delete_secret(self): self.service.delete_secret("fake_secret") self.service._clients.barbican().secrets.delete \ .assert_called_once_with("fake_secret") self._test_atomic_action_timer(self.atomic_actions(), "barbican.delete_secret") def test__list_containers(self): self.assertEqual( self.service.list_container(), self.service._clients.barbican().containers.list.return_value) self._test_atomic_action_timer( self.atomic_actions(), "barbican.list_container") def test__container_delete(self): self.service.container_delete("fake_container") self.service._clients.barbican().containers.delete \ .assert_called_once_with("fake_container") self._test_atomic_action_timer( self.atomic_actions(), "barbican.container_delete") def test__container_create(self): self.service.generate_random_name = mock.MagicMock( return_value="container") self.service.container_create() self.service._clients.barbican().containers.create \ .assert_called_once_with(name="container", secrets=None) def test__create_rsa_container(self): self.service.generate_random_name = mock.MagicMock( return_value="container") self.service.create_rsa_container() self.service._clients.barbican().containers.create_rsa \ .assert_called_once_with( name="container", private_key=None, private_key_passphrase=None, public_key=None) def test__create_generate_container(self): self.service.generate_random_name = mock.MagicMock( return_value="container") self.service.create_certificate_container() self.service._clients.barbican().containers \ .create_certificate.assert_called_once_with( certificate=None, intermediates=None, name="container", private_key=None, private_key_passphrase=None) def test__list_orders(self): self.assertEqual( self.service.orders_list(), self.service._clients.barbican().orders.list.return_value) self._test_atomic_action_timer( self.atomic_actions(), "barbican.orders_list") def test__orders_get(self): self.service.orders_get("fake_order") self.service._clients.barbican().orders.get \ .assert_called_once_with("fake_order") def test__orders_delete(self): self.service.orders_delete("fake_order") self.service._clients.barbican().orders.delete \ .assert_called_once_with("fake_order") self._test_atomic_action_timer( self.atomic_actions(), "barbican.orders_delete") def test__create_key(self): self.service.generate_random_name = mock.MagicMock( return_value="key") self.service.create_key() self.service._clients.barbican().orders.create_key \ .assert_called_once_with( name="key", algorithm="aes", bit_length=256, mode=None, payload_content_type=None, expiration=None) self._test_atomic_action_timer( self.atomic_actions(), "barbican.create_key") def test__create_asymmetric(self): self.service.generate_random_name = mock.MagicMock( return_value="key") self.service.create_asymmetric() self.service._clients.barbican().orders.create_asymmetric \ .assert_called_once_with( algorithm="aes", bit_length=256, expiration=None, name="key", pass_phrase=None, payload_content_type=None) self._test_atomic_action_timer( self.atomic_actions(), "barbican.create_asymmetric") def test_create_certificate(self): self.service.generate_random_name = mock.MagicMock( return_value="key") self.service.create_certificate() self.service._clients.barbican().orders.create_certificate \ .assert_called_once_with( name="key", request_type=None, subject_dn=None, source_container_ref=None, ca_id=None, profile=None, request_data=None) self._test_atomic_action_timer( self.atomic_actions(), "barbican.create_certificate")
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,808
openstack/rally-openstack
refs/heads/master
/tests/unit/task/contexts/network/test_routers.py
# Copyright 2017: Orange # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock from rally_openstack.task.contexts.network import routers as router_context from rally_openstack.task.scenarios.neutron import utils as neutron_utils from tests.unit import test SCN = "rally_openstack.task.scenarios" CTX = "rally_openstack.task.contexts.network.routers" class RouterTestCase(test.ScenarioTestCase): def _gen_tenants(self, count): tenants = {} for id_ in range(count): tenants[str(id_)] = {"name": str(id_)} return tenants def test__init__default(self): self.context.update({ "config": { "router": { "routers_per_tenant": 1, } } }) context = router_context.Router(self.context) self.assertEqual(context.config["routers_per_tenant"], 1) @mock.patch("%s.neutron.utils.NeutronScenario._create_router" % SCN, return_value={"id": "uuid"}) def test_setup(self, mock_neutron_scenario__create_router): tenants_count = 2 users_per_tenant = 3 routers_per_tenant = 2 tenants = self._gen_tenants(tenants_count) users = [] for id_ in tenants.keys(): for i in range(users_per_tenant): users.append({"id": i, "tenant_id": id_, "credential": mock.MagicMock()}) self.context.update({ "config": { "users": { "tenants": 2, "users_per_tenant": 3, "concurrent": 2, }, "router": { "routers_per_tenant": routers_per_tenant, } }, "admin": { "credential": mock.MagicMock() }, "users": users, "tenants": tenants }) new_context = copy.deepcopy(self.context) for id_ in tenants.keys(): new_context["tenants"][id_].setdefault("routers", []) for i in range(routers_per_tenant): new_context["tenants"][id_]["routers"].append({"id": "uuid"}) routers_ctx = router_context.Router(self.context) routers_ctx.setup() self.assertEqual(new_context, self.context) @mock.patch("%s.resource_manager.cleanup" % CTX) def test_cleanup(self, mock_cleanup): self.context.update({"users": mock.MagicMock()}) routers_ctx = router_context.Router(self.context) routers_ctx.cleanup() mock_cleanup.assert_called_once_with( names=["neutron.router"], users=self.context["users"], superclass=neutron_utils.NeutronScenario, task_id=self.context["owner_id"])
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,809
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/contexts/manila/manila_security_services.py
# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import cfg from rally.common import validation from rally_openstack.common import consts as rally_consts from rally_openstack.task.cleanup import manager as resource_manager from rally_openstack.task import context from rally_openstack.task.contexts.manila import consts from rally_openstack.task.scenarios.manila import utils as manila_utils CONF = cfg.CONF CONTEXT_NAME = consts.SECURITY_SERVICES_CONTEXT_NAME @validation.add("required_platform", platform="openstack", users=True) @context.configure(name=CONTEXT_NAME, platform="openstack", order=445) class SecurityServices(context.OpenStackContext): """This context creates 'security services' for Manila project.""" CONFIG_SCHEMA = { "type": "object", "$schema": rally_consts.JSON_SCHEMA, "properties": { "security_services": { "type": "array", "description": "It is expected to be list of dicts with data for creation" " of security services.", "items": { "type": "object", "properties": {"type": {"enum": ["active_directory", "kerberos", "ldap"]}}, "required": ["type"], "additionalProperties": True, "description": "Data for creation of security services. \n " "Example:\n\n" " .. code-block:: json\n\n" " {'type': 'LDAP', 'dns_ip': 'foo_ip', \n" " 'server': 'bar_ip', 'domain': 'quuz_domain',\n" " 'user': 'ololo', 'password': 'fake_password'}\n" } }, }, "additionalProperties": False } DEFAULT_CONFIG = { "security_services": [], } def setup(self): for user, tenant_id in (self._iterate_per_tenants( self.context.get("users", []))): self.context["tenants"][tenant_id][CONTEXT_NAME] = { "security_services": [], } if self.config["security_services"]: manila_scenario = manila_utils.ManilaScenario({ "task": self.task, "owner_id": self.context["owner_id"], "user": user }) for ss in self.config["security_services"]: inst = manila_scenario._create_security_service( **ss).to_dict() self.context["tenants"][tenant_id][CONTEXT_NAME][ "security_services"].append(inst) def cleanup(self): resource_manager.cleanup( names=["manila.security_services"], users=self.context.get("users", []), superclass=manila_utils.ManilaScenario, task_id=self.get_owner_id())
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,810
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/glance/test_images.py
# Copyright 2014: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally import exceptions from rally_openstack.task.scenarios.glance import images from tests.unit import fakes from tests.unit import test BASE = "rally_openstack.task.scenarios.glance.images" GLANCE_V2_PATH = ("rally_openstack.common.services.image.glance_v2." "GlanceV2Service") class GlanceBasicTestCase(test.ScenarioTestCase): def get_test_context(self): context = super(GlanceBasicTestCase, self).get_test_context() context.update({ "admin": { "id": "fake_user_id", "credential": mock.MagicMock() }, "user": { "id": "fake_user_id", "credential": mock.MagicMock() }, "tenant": {"id": "fake_tenant_id", "name": "fake_tenant_name"} }) return context def setUp(self): super(GlanceBasicTestCase, self).setUp() patch = mock.patch( "rally_openstack.common.services.image.image.Image") self.addCleanup(patch.stop) self.mock_image = patch.start() def test_create_and_list_image(self): image_service = self.mock_image.return_value fake_image = mock.Mock(id=1, name="img_2") image_service.create_image.return_value = fake_image image_service.list_images.return_value = [ mock.Mock(id=0, name="img_1"), fake_image, mock.Mock(id=2, name="img_3")] properties = {"fakeprop": "fake"} call_args = {"container_format": "cf", "image_location": "url", "disk_format": "df", "visibility": "vs", "min_disk": 0, "min_ram": 0, "properties": properties} # Positive case images.CreateAndListImage(self.context).run( "cf", "url", "df", "vs", 0, 0, properties) image_service.create_image.assert_called_once_with(**call_args) # Negative case: image isn't created image_service.create_image.return_value = None self.assertRaises(exceptions.RallyAssertionError, images.CreateAndListImage(self.context).run, "cf", "url", "df", "vs", 0, 0, properties) image_service.create_image.assert_called_with(**call_args) # Negative case: created image n ot in the list of available images image_service.create_image.return_value = mock.Mock( id=12, name="img_nameN") self.assertRaises(exceptions.RallyAssertionError, images.CreateAndListImage(self.context).run, "cf", "url", "df", "vs", 0, 0, properties) image_service.create_image.assert_called_with(**call_args) image_service.list_images.assert_called_with() def test_list_images(self): image_service = self.mock_image.return_value images.ListImages(self.context).run() image_service.list_images.assert_called_once_with() def test_create_and_delete_image(self): image_service = self.mock_image.return_value fake_image = fakes.FakeImage(id=1, name="imagexxx") image_service.create_image.return_value = fake_image properties = {"fakeprop": "fake"} call_args = {"container_format": "cf", "image_location": "url", "disk_format": "df", "visibility": "vs", "min_disk": 0, "min_ram": 0, "properties": properties} images.CreateAndDeleteImage(self.context).run( "cf", "url", "df", "vs", 0, 0, properties) image_service.create_image.assert_called_once_with(**call_args) image_service.delete_image.assert_called_once_with(fake_image.id) def test_create_and_get_image(self): image_service = self.mock_image.return_value fake_image = fakes.FakeImage(id=1, name="img_name1") image_service.create_image.return_value = fake_image fake_image_info = fakes.FakeImage(id=1, name="img_name1", status="active") image_service.get_image.return_value = fake_image_info properties = {"fakeprop": "fake"} call_args = {"container_format": "cf", "image_location": "url", "disk_format": "df", "visibility": "vs", "min_disk": 0, "min_ram": 0, "properties": properties} # Positive case images.CreateAndGetImage(self.context).run( "cf", "url", "df", "vs", 0, 0, properties) image_service.create_image.assert_called_once_with(**call_args) image_service.get_image.assert_called_once_with(fake_image) # Negative case: image isn't created image_service.create_image.reset_mock() image_service.create_image.return_value = None self.assertRaises(exceptions.RallyAssertionError, images.CreateAndGetImage(self.context).run, "cf", "url", "df", "vs", 0, 0, properties) image_service.create_image.assert_called_with(**call_args) # Negative case: image obtained in _get_image not the created image image_service.create_image.reset_mock() image_service.get_image.reset_mock() image_service.create_image.return_value = fakes.FakeImage( id=12, name="img_nameN") self.assertRaises(exceptions.RallyAssertionError, images.CreateAndGetImage(self.context).run, "cf", "url", "df", "vs", 0, 0, properties) image_service.create_image.assert_called_with(**call_args) image_service.get_image.assert_called_with( image_service.create_image.return_value) def test_create_and_download_image(self): image_service = self.mock_image.return_value fake_image = fakes.FakeImage() image_service.create_image.return_value = fake_image properties = {"fakeprop": "fake"} call_args = {"container_format": "cf", "image_location": "url", "disk_format": "df", "visibility": "vs", "min_disk": 0, "min_ram": 0, "properties": properties} images.CreateAndDownloadImage(self.context).run( "cf", "url", "df", "vs", 0, 0, properties=properties) image_service.create_image.assert_called_once_with(**call_args) image_service.download_image.assert_called_once_with(fake_image.id) @mock.patch("%s.CreateImageAndBootInstances._boot_servers" % BASE) def test_create_image_and_boot_instances(self, mock_boot_servers): image_service = self.mock_image.return_value fake_image = fakes.FakeImage() fake_servers = [mock.Mock() for i in range(5)] image_service.create_image.return_value = fake_image mock_boot_servers.return_value = fake_servers boot_server_kwargs = {"fakeserverarg": "f"} properties = {"fakeprop": "fake"} call_args = {"container_format": "cf", "image_location": "url", "disk_format": "df", "visibility": "vs", "min_disk": 0, "min_ram": 0, "properties": properties} images.CreateImageAndBootInstances(self.context).run( "cf", "url", "df", "fid", 5, visibility="vs", min_disk=0, min_ram=0, properties=properties, boot_server_kwargs=boot_server_kwargs) image_service.create_image.assert_called_once_with(**call_args) mock_boot_servers.assert_called_once_with("image-id-0", "fid", 5, **boot_server_kwargs) def test_create_and_update_image(self): image_service = self.mock_image.return_value fake_image = fakes.FakeImage(id=1, name="imagexxx") image_service.create_image.return_value = fake_image properties = {"fakeprop": "fake"} create_args = {"container_format": "cf", "image_location": "url", "disk_format": "df", "visibility": "vs", "min_disk": 0, "min_ram": 0, "properties": properties} images.CreateAndUpdateImage(self.context).run( "cf", "url", "df", None, "vs", 0, 0, properties, 0, 0) image_service.create_image.assert_called_once_with(**create_args) image_service.update_image.assert_called_once_with( fake_image.id, min_disk=0, min_ram=0, remove_props=None) @mock.patch("%s.create_image" % GLANCE_V2_PATH) @mock.patch("%s.deactivate_image" % GLANCE_V2_PATH) def test_create_and_deactivate_image(self, mock_deactivate_image, mock_create_image): fake_image = fakes.FakeImage(id=1, name="img_name1") mock_create_image.return_value = fake_image call_args = {"container_format": "cf", "image_location": "url", "disk_format": "df", "visibility": "vs", "min_disk": 0, "min_ram": 0} images.CreateAndDeactivateImage(self.context).run( "cf", "url", "df", "vs", 0, 0) mock_create_image.assert_called_once_with(**call_args) mock_deactivate_image.assert_called_once_with(fake_image.id)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,811
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/quotas/test_utils.py
# Copyright 2014: Kylin Cloud # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally_openstack.task.scenarios.quotas import utils from tests.unit import test class QuotasScenarioTestCase(test.ScenarioTestCase): def test__update_quotas(self): tenant_id = "fake_tenant" quotas = { "metadata_items": 10, "key_pairs": 10, "injected_file_content_bytes": 1024, "injected_file_path_bytes": 1024, "ram": 5120, "instances": 10, "injected_files": 10, "cores": 10, } self.admin_clients("nova").quotas.update.return_value = quotas scenario = utils.QuotasScenario(self.context) scenario._generate_quota_values = mock.MagicMock(return_value=quotas) result = scenario._update_quotas("nova", tenant_id) self.assertEqual(quotas, result) self.admin_clients("nova").quotas.update.assert_called_once_with( tenant_id, **quotas) self._test_atomic_action_timer(scenario.atomic_actions(), "quotas.update_quotas") def test__update_quotas_fn(self): tenant_id = "fake_tenant" quotas = { "metadata_items": 10, "key_pairs": 10, "injected_file_content_bytes": 1024, "injected_file_path_bytes": 1024, "ram": 5120, "instances": 10, "injected_files": 10, "cores": 10, } self.admin_clients("nova").quotas.update.return_value = quotas scenario = utils.QuotasScenario(self.context) scenario._generate_quota_values = mock.MagicMock(return_value=quotas) mock_quota = mock.Mock(return_value=quotas) result = scenario._update_quotas("nova", tenant_id, quota_update_fn=mock_quota) self.assertEqual(quotas, result) self._test_atomic_action_timer(scenario.atomic_actions(), "quotas.update_quotas") def test__generate_quota_values_nova(self): max_quota = 1024 scenario = utils.QuotasScenario(self.context) quotas = scenario._generate_quota_values(max_quota, "nova") for k, v in quotas.items(): self.assertGreaterEqual(v, -1) self.assertLessEqual(v, max_quota) def test__generate_quota_values_cinder(self): max_quota = 1024 scenario = utils.QuotasScenario(self.context) quotas = scenario._generate_quota_values(max_quota, "cinder") for k, v in quotas.items(): self.assertGreaterEqual(v, -1) self.assertLessEqual(v, max_quota) def test__generate_quota_values_neutron(self): max_quota = 1024 scenario = utils.QuotasScenario(self.context) quotas = scenario._generate_quota_values(max_quota, "neutron") for v in quotas.values(): for v1 in v.values(): for v2 in v1.values(): self.assertGreaterEqual(v2, -1) self.assertLessEqual(v2, max_quota) def test__delete_quotas(self): tenant_id = "fake_tenant" scenario = utils.QuotasScenario(self.context) scenario._delete_quotas("nova", tenant_id) self.admin_clients("nova").quotas.delete.assert_called_once_with( tenant_id) self._test_atomic_action_timer(scenario.atomic_actions(), "quotas.delete_quotas") def test__get_quotas(self): tenant_id = "fake_tenant" scenario = utils.QuotasScenario(self.context) scenario._get_quotas("nova", tenant_id) self.admin_clients("nova").quotas.get.assert_called_once_with( tenant_id) self._test_atomic_action_timer(scenario.atomic_actions(), "quotas.get_quotas")
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,812
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/gnocchi/test_archive_policy.py
# Copyright 2017 Red Hat, Inc. <http://www.redhat.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally_openstack.task.scenarios.gnocchi import archive_policy from tests.unit import test class GnocchiArchivePolicyTestCase(test.ScenarioTestCase): def get_test_context(self): context = super(GnocchiArchivePolicyTestCase, self).get_test_context() context.update({ "admin": { "user_id": "fake", "credential": mock.MagicMock() }, "user": { "user_id": "fake", "credential": mock.MagicMock() }, "tenant": {"id": "fake"} }) return context def setUp(self): super(GnocchiArchivePolicyTestCase, self).setUp() patch = mock.patch( "rally_openstack.common.services.gnocchi.metric.GnocchiService") self.addCleanup(patch.stop) self.mock_metric = patch.start() def test_list_archive_policy(self): metric_service = self.mock_metric.return_value scenario = archive_policy.ListArchivePolicy(self.context) scenario.run() metric_service.list_archive_policy.assert_called_once_with() def test_create_archive_policy(self): metric_service = self.mock_metric.return_value scenario = archive_policy.CreateArchivePolicy(self.context) scenario.generate_random_name = mock.MagicMock(return_value="name") attrs = [{"foo": "1:23:24", "bar": "5:43:21"}] aggreg = ["foo1", "foo2"] scenario.run(definition=attrs, aggregation_methods=aggreg) metric_service.create_archive_policy.assert_called_once_with( "name", definition=attrs, aggregation_methods=aggreg) def test_create_delete_archive_policy(self): metric_service = self.mock_metric.return_value scenario = archive_policy.CreateDeleteArchivePolicy(self.context) scenario.generate_random_name = mock.MagicMock(return_value="name") attrs = [{"foo": "2:34:55", "bar": "4:32:10"}] aggreg = ["foo3", "foo4"] scenario.run(definition=attrs, aggregation_methods=aggreg) metric_service.create_archive_policy.assert_called_once_with( "name", definition=attrs, aggregation_methods=aggreg) metric_service.delete_archive_policy.assert_called_once_with( "name")
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,813
openstack/rally-openstack
refs/heads/master
/tests/unit/task/contexts/senlin/test_profiles.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally_openstack.task.contexts.senlin import profiles from tests.unit import test BASE_CTX = "rally.task.context" CTX = "rally_openstack.context" BASE_SCN = "rally.task.scenarios" SCN = "rally_openstack.task.scenarios" class ProfilesGeneratorTestCase(test.ScenarioTestCase): """Generate tenants.""" def _gen_tenants(self, count): tenants = {} for _id in range(count): tenants[str(_id)] = {"id": str(_id)} return tenants def setUp(self): super(ProfilesGeneratorTestCase, self).setUp() self.tenants_count = 2 self.users_per_tenant = 3 tenants = self._gen_tenants(self.tenants_count) users = [] for tenant in tenants: for i in range(self.users_per_tenant): users.append({"id": i, "tenant_id": tenant, "credential": mock.MagicMock()}) self.context = { "config": { "users": { "tenants": self.tenants_count, "users_per_tenant": self.users_per_tenant }, "profiles": { "type": "profile_type_name", "version": "1.0", "properties": {"k1": "v1", "k2": "v2"} }, }, "users": users, "tenants": tenants, "task": mock.MagicMock() } @mock.patch("%s.senlin.utils.SenlinScenario._create_profile" % SCN, return_value=mock.MagicMock(id="TEST_PROFILE_ID")) def test_setup(self, mock_senlin_scenario__create_profile): profile_ctx = profiles.ProfilesGenerator(self.context) profile_ctx.setup() spec = self.context["config"]["profiles"] mock_calls = [mock.call(spec) for i in range(self.tenants_count)] mock_senlin_scenario__create_profile.assert_has_calls(mock_calls) for tenant in self.context["tenants"]: self.assertEqual("TEST_PROFILE_ID", self.context["tenants"][tenant]["profile"]) @mock.patch("%s.senlin.utils.SenlinScenario._delete_profile" % SCN) def test_cleanup(self, mock_senlin_scenario__delete_profile): for tenant in self.context["tenants"]: self.context["tenants"][tenant].update( {"profile": "TEST_PROFILE_ID"}) profile_ctx = profiles.ProfilesGenerator(self.context) profile_ctx.cleanup() mock_calls = [mock.call("TEST_PROFILE_ID") for i in range( self.tenants_count)] mock_senlin_scenario__delete_profile.assert_has_calls(mock_calls)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,814
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/gnocchi/test_archive_policy_rule.py
# Copyright 2017 Red Hat, Inc. <http://www.redhat.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally_openstack.task.scenarios.gnocchi import archive_policy_rule from tests.unit import test class GnocchiArchivePolicyRuleTestCase(test.ScenarioTestCase): def get_test_context(self): context = super(GnocchiArchivePolicyRuleTestCase, self).get_test_context() context.update({ "admin": { "user_id": "fake", "credential": mock.MagicMock() }, "user": { "user_id": "fake", "credential": mock.MagicMock() }, "tenant": {"id": "fake"} }) return context def setUp(self): super(GnocchiArchivePolicyRuleTestCase, self).setUp() patch = mock.patch( "rally_openstack.common.services.gnocchi.metric.GnocchiService") self.addCleanup(patch.stop) self.mock_metric = patch.start() def test_list_archive_policy_rule(self): metric_service = self.mock_metric.return_value scenario = archive_policy_rule.ListArchivePolicyRule(self.context) scenario.run() metric_service.list_archive_policy_rule.assert_called_once_with() def test_create_archive_policy_rule(self): metric_service = self.mock_metric.return_value scenario = archive_policy_rule.CreateArchivePolicyRule(self.context) scenario.generate_random_name = mock.MagicMock(return_value="name") scenario.run(metric_pattern="foo_pat*", archive_policy_name="foo_pol") metric_service.create_archive_policy_rule.assert_called_once_with( "name", metric_pattern="foo_pat*", archive_policy_name="foo_pol") def test_create_delete_archive_policy_rule(self): metric_service = self.mock_metric.return_value scenario = archive_policy_rule.CreateDeleteArchivePolicyRule( self.context) scenario.generate_random_name = mock.MagicMock(return_value="name") scenario.run(metric_pattern="foo_pat*", archive_policy_name="foo_pol") metric_service.create_archive_policy_rule.assert_called_once_with( "name", metric_pattern="foo_pat*", archive_policy_name="foo_pol") metric_service.delete_archive_policy_rule.assert_called_once_with( "name")
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,815
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/neutron/test_trunk.py
# Copyright 2014: Intel Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally_openstack.task.scenarios.neutron import trunk from tests.unit import test class NeutronTrunkTestCase(test.ScenarioTestCase): def test_create_and_list_trunks(self): subport_count = 10 network_create_args = {} net = mock.MagicMock() scenario = trunk.CreateAndListTrunks(self.context) scenario._create_network = mock.Mock(return_value=net) scenario._create_port = mock.MagicMock() scenario._create_trunk = mock.MagicMock() scenario._list_subports_by_trunk = mock.MagicMock() scenario._update_port = mock.Mock() scenario.run(network_create_args=network_create_args, subport_count=subport_count) scenario._create_network.assert_called_once_with( network_create_args) scenario._create_port.assert_has_calls( [mock.call(net, {}) for _ in range(subport_count + 1)]) self.assertEqual(1, scenario._create_trunk.call_count) self.assertEqual(1, scenario._list_subports_by_trunk.call_count) def test_boot_server_with_subports(self): img_name = "img" flavor_uuid = 0 subport_count = 10 network_create_args = {} net = mock.MagicMock() port = {"port": {"id": "port-id"}} kwargs = {"nics": [{"port-id": "port-id"}]} subnet = {"subnet": {"id": "subnet-id"}} scenario = trunk.BootServerWithSubports(self.context) scenario._boot_server = mock.MagicMock() scenario._create_port = mock.MagicMock(return_value=port) scenario._create_trunk = mock.MagicMock() scenario._create_network_and_subnets = mock.MagicMock() scenario._create_network_and_subnets.return_value = net, [subnet] scenario.run(img_name, flavor_uuid, network_create_args=network_create_args, subport_count=subport_count) scenario._create_port.assert_has_calls( [mock.call(net, {"fixed_ips": [{"subnet_id": subnet["subnet"]["id"]}]}) for _ in range(subport_count + 1)]) self.assertEqual(1, scenario._create_trunk.call_count) self.assertEqual(11, scenario._create_network_and_subnets.call_count) scenario._boot_server.assert_called_once_with(img_name, flavor_uuid, **kwargs) def test_boot_server_and_add_subports(self): img_name = "img" flavor_uuid = 0 subport_count = 10 network_create_args = {} net = mock.MagicMock() port = {"port": {"id": "port-id"}} kwargs = {"nics": [{"port-id": "port-id"}]} subnet = {"subnet": {"id": "subnet-id"}} scenario = trunk.BootServerAndAddSubports(self.context) scenario._boot_server = mock.MagicMock() scenario._create_port = mock.MagicMock(return_value=port) scenario._create_trunk = mock.MagicMock() scenario._add_subports_to_trunk = mock.MagicMock() scenario._create_network_and_subnets = mock.MagicMock() scenario._create_network_and_subnets.return_value = net, [subnet] scenario.run(img_name, flavor_uuid, network_create_args=network_create_args, subport_count=subport_count) scenario._create_port.assert_has_calls( [mock.call(net, {"fixed_ips": [{"subnet_id": subnet["subnet"]["id"]}]}) for _ in range(subport_count + 1)]) self.assertEqual(1, scenario._create_trunk.call_count) scenario._boot_server.assert_called_once_with(img_name, flavor_uuid, **kwargs) self.assertEqual(10, scenario._add_subports_to_trunk.call_count) self.assertEqual(11, scenario._create_network_and_subnets.call_count) def test_boot_server_and_batch_add_subports(self): img_name = "img" flavor_uuid = 0 subports_per_batch = 10 batches = 5 network_create_args = {} net = mock.MagicMock() port = {"port": {"id": "port-id"}} kwargs = {"nics": [{"port-id": "port-id"}]} subnet = {"subnet": {"id": "subnet-id"}} scenario = trunk.BootServerAndBatchAddSubports(self.context) scenario._boot_server = mock.MagicMock() scenario._create_port = mock.MagicMock(return_value=port) scenario._create_trunk = mock.MagicMock() scenario._add_subports_to_trunk = mock.MagicMock() scenario._create_network_and_subnets = mock.MagicMock() scenario._create_network_and_subnets.return_value = net, [subnet] scenario.run(img_name, flavor_uuid, network_create_args=network_create_args, subports_per_batch=10, batches=5) scenario._create_port.assert_has_calls( [mock.call(net, {"fixed_ips": [{"subnet_id": subnet["subnet"]["id"]}]}) for _ in range(subports_per_batch * batches + 1)]) self.assertEqual(1, scenario._create_trunk.call_count) scenario._boot_server.assert_called_once_with(img_name, flavor_uuid, **kwargs) self.assertEqual(5, scenario._add_subports_to_trunk.call_count) self.assertEqual(51, scenario._create_network_and_subnets.call_count)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,816
openstack/rally-openstack
refs/heads/master
/rally_openstack/common/services/image/glance_v2.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import time from rally.common import cfg from rally.common import utils as rutils from rally.task import atomic from rally.task import utils import requests from rally_openstack.common import service from rally_openstack.common.services.image import glance_common from rally_openstack.common.services.image import image CONF = cfg.CONF @service.service("glance", service_type="image", version="2") class GlanceV2Service(service.Service, glance_common.GlanceMixin): @atomic.action_timer("glance_v2.upload_data") def upload_data(self, image_id, image_location): """Upload the data for an image. :param image_id: Image ID to upload data to. :param image_location: Location of the data to upload to. """ image_location = os.path.expanduser(image_location) image_data = None response = None try: if os.path.isfile(image_location): image_data = open(image_location, "rb") else: response = requests.get(image_location, stream=True, verify=False) image_data = response.raw self._clients.glance("2").images.upload(image_id, image_data) finally: if image_data is not None: image_data.close() if response is not None: response.close() @atomic.action_timer("glance_v2.create_image") def create_image(self, image_name=None, container_format=None, image_location=None, disk_format=None, visibility=None, min_disk=0, min_ram=0, properties=None): """Creates new image. :param image_name: Image name for which need to be created :param container_format: Container format :param image_location: The new image's location :param disk_format: Disk format :param visibility: The created image's visible status. :param min_disk: The min disk of created images :param min_ram: The min ram of created images :param properties: Dict of image properties """ image_name = image_name or self.generate_random_name() properties = properties or {} image_obj = self._clients.glance("2").images.create( name=image_name, container_format=container_format, disk_format=disk_format, visibility=visibility, min_disk=min_disk, min_ram=min_ram, **properties) rutils.interruptable_sleep(CONF.openstack. glance_image_create_prepoll_delay) start = time.time() image_obj = utils.wait_for_status( image_obj.id, ["queued"], update_resource=self.get_image, timeout=CONF.openstack.glance_image_create_timeout, check_interval=CONF.openstack.glance_image_create_poll_interval) timeout = time.time() - start self.upload_data(image_obj.id, image_location=image_location) image_obj = utils.wait_for_status( image_obj, ["active"], update_resource=self.get_image, timeout=timeout, check_interval=CONF.openstack.glance_image_create_poll_interval) return image_obj @atomic.action_timer("glance_v2.update_image") def update_image(self, image_id, image_name=None, min_disk=0, min_ram=0, remove_props=None): """Update image. :param image_id: ID of image to update :param image_name: Image name to be updated to :param min_disk: The min disk of updated image :param min_ram: The min ram of updated image :param remove_props: List of property names to remove """ image_name = image_name or self.generate_random_name() return self._clients.glance("2").images.update( image_id=image_id, name=image_name, min_disk=min_disk, min_ram=min_ram, remove_props=remove_props) @atomic.action_timer("glance_v2.list_images") def list_images(self, status="active", visibility=None, owner=None): """List images. :param status: Filter in images for the specified status :param visibility: Filter in images for the specified visibility :param owner: Filter in images for tenant ID """ filters = {} filters["status"] = status if visibility: filters["visibility"] = visibility if owner: filters["owner"] = owner # NOTE(boris-42): image.list() is lazy method which doesn't query API # until it's used, do not remove list(). return list(self._clients.glance("2").images.list(filters=filters)) @atomic.action_timer("glance_v2.set_visibility") def set_visibility(self, image_id, visibility="shared"): """Update visibility. :param image_id: ID of image to update :param visibility: The visibility of specified image """ self._clients.glance("2").images.update(image_id, visibility=visibility) @atomic.action_timer("glance_v2.deactivate_image") def deactivate_image(self, image_id): """deactivate image.""" self._clients.glance("2").images.deactivate(image_id) @atomic.action_timer("glance_v2.reactivate_image") def reactivate_image(self, image_id): """reactivate image.""" self._clients.glance("2").images.reactivate(image_id) @service.compat_layer(GlanceV2Service) class UnifiedGlanceV2Service(glance_common.UnifiedGlanceMixin, image.Image): """Compatibility layer for Glance V2.""" @staticmethod def _check_v2_visibility(visibility): visibility_values = ["public", "private", "shared", "community"] if visibility and visibility not in visibility_values: raise image.VisibilityException( message="Improper visibility value: %s in glance_v2" % visibility) def create_image(self, image_name=None, container_format=None, image_location=None, disk_format=None, visibility=None, min_disk=0, min_ram=0, properties=None): """Creates new image. :param image_name: Image name for which need to be created :param container_format: Container format :param image_location: The new image's location :param disk_format: Disk format :param visibility: The access permission for the created image. :param min_disk: The min disk of created images :param min_ram: The min ram of created images :param properties: Dict of image properties """ image_obj = self._impl.create_image( image_name=image_name, container_format=container_format, image_location=image_location, disk_format=disk_format, visibility=visibility, min_disk=min_disk, min_ram=min_ram, properties=properties) return self._unify_image(image_obj) def update_image(self, image_id, image_name=None, min_disk=0, min_ram=0, remove_props=None): """Update image. :param image_id: ID of image to update :param image_name: Image name to be updated to :param min_disk: The min disk of updated image :param min_ram: The min ram of updated image :param remove_props: List of property names to remove """ image_obj = self._impl.update_image( image_id=image_id, image_name=image_name, min_disk=min_disk, min_ram=min_ram, remove_props=remove_props) return self._unify_image(image_obj) def list_images(self, status="active", visibility=None, owner=None): """List images. :param status: Filter in images for the specified status :param visibility: Filter in images for the specified visibility :param owner: Filter in images for tenant ID """ self._check_v2_visibility(visibility) images = self._impl.list_images( status=status, visibility=visibility, owner=owner) return [self._unify_image(i) for i in images] def set_visibility(self, image_id, visibility="shared"): """Update visibility. :param image_id: ID of image to update :param visibility: The visibility of specified image """ self._check_v2_visibility(visibility) self._impl.set_visibility(image_id=image_id, visibility=visibility)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,817
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/quotas/quotas.py
# Copyright 2014: Kylin Cloud # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.task import validation from rally_openstack.common import consts from rally_openstack.task import scenario from rally_openstack.task.scenarios.quotas import utils """Scenarios for quotas.""" @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True, users=True) @scenario.configure(context={"admin_cleanup@openstack": ["nova.quotas"]}, name="Quotas.nova_update", platform="openstack") class NovaUpdate(utils.QuotasScenario): def run(self, max_quota=1024): """Update quotas for Nova. :param max_quota: Max value to be updated for quota. """ self._update_quotas("nova", self.context["tenant"]["id"], max_quota) @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True, users=True) @scenario.configure(context={"admin_cleanup@openstack": ["nova.quotas"]}, name="Quotas.nova_update_and_delete", platform="openstack") class NovaUpdateAndDelete(utils.QuotasScenario): def run(self, max_quota=1024): """Update and delete quotas for Nova. :param max_quota: Max value to be updated for quota. """ self._update_quotas("nova", self.context["tenant"]["id"], max_quota) self._delete_quotas("nova", self.context["tenant"]["id"]) @validation.add("required_services", services=[consts.Service.CINDER]) @validation.add("required_platform", platform="openstack", admin=True, users=True) @scenario.configure(context={"admin_cleanup@openstack": ["cinder.quotas"]}, name="Quotas.cinder_update", platform="openstack") class CinderUpdate(utils.QuotasScenario): def run(self, max_quota=1024): """Update quotas for Cinder. :param max_quota: Max value to be updated for quota. """ self._update_quotas("cinder", self.context["tenant"]["id"], max_quota) @validation.add("required_services", services=[consts.Service.CINDER]) @validation.add("required_platform", platform="openstack", admin=True, users=True) @scenario.configure(context={"admin_cleanup@openstack": ["cinder.quotas"]}, name="Quotas.cinder_get", platform="openstack") class CinderGet(utils.QuotasScenario): def run(self): """Get quotas for Cinder. Measure the "cinder quota-show" command performance """ self._get_quotas("cinder", self.context["tenant"]["id"]) @validation.add("required_services", services=[consts.Service.CINDER]) @validation.add("required_platform", platform="openstack", admin=True, users=True) @scenario.configure(context={"admin_cleanup@openstack": ["cinder.quotas"]}, name="Quotas.cinder_update_and_delete", platform="openstack") class CinderUpdateAndDelete(utils.QuotasScenario): def run(self, max_quota=1024): """Update and Delete quotas for Cinder. :param max_quota: Max value to be updated for quota. """ self._update_quotas("cinder", self.context["tenant"]["id"], max_quota) self._delete_quotas("cinder", self.context["tenant"]["id"]) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", admin=True, users=True) @scenario.configure(context={"admin_cleanup@openstack": ["neutron.quota"]}, name="Quotas.neutron_update", platform="openstack") class NeutronUpdate(utils.QuotasScenario): def run(self, max_quota=1024): """Update quotas for neutron. :param max_quota: Max value to be updated for quota. """ quota_update_fn = self.admin_clients("neutron").update_quota self._update_quotas("neutron", self.context["tenant"]["id"], max_quota, quota_update_fn) @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True, users=True) @scenario.configure(context={"admin_cleanup@openstack": ["nova.quotas"]}, name="Quotas.nova_get", platform="openstack") class NovaGet(utils.QuotasScenario): def run(self): """Get quotas for nova.""" self._get_quotas("nova", self.context["tenant"]["id"])
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,818
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/murano/packages.py
# Copyright 2015: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from rally.task import types from rally.task import validation from rally_openstack.common import consts from rally_openstack.task import scenario from rally_openstack.task.scenarios.murano import utils """Scenarios for Murano packages.""" @types.convert(package={"type": "expand_user_path"}) @validation.add("file_exists", param_name="package", mode=os.F_OK) @validation.add("required_services", services=[consts.Service.MURANO]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["murano.packages"]}, name="MuranoPackages.import_and_list_packages", platform="openstack") class ImportAndListPackages(utils.MuranoScenario): def run(self, package, include_disabled=False): """Import Murano package and get list of packages. Measure the "murano import-package" and "murano package-list" commands performance. It imports Murano package from "package" (if it is not a zip archive then zip archive will be prepared) and gets list of imported packages. :param package: path to zip archive that represents Murano application package or absolute path to folder with package components :param include_disabled: specifies whether the disabled packages will be included in a the result or not. Default value is False. """ package_path = self._zip_package(package) try: self._import_package(package_path) self._list_packages(include_disabled=include_disabled) finally: os.remove(package_path) @types.convert(package={"type": "expand_user_path"}) @validation.add("file_exists", param_name="package", mode=os.F_OK) @validation.add("required_services", services=[consts.Service.MURANO]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["murano.packages"]}, name="MuranoPackages.import_and_delete_package", platform="openstack") class ImportAndDeletePackage(utils.MuranoScenario): def run(self, package): """Import Murano package and then delete it. Measure the "murano import-package" and "murano package-delete" commands performance. It imports Murano package from "package" (if it is not a zip archive then zip archive will be prepared) and deletes it. :param package: path to zip archive that represents Murano application package or absolute path to folder with package components """ package_path = self._zip_package(package) try: package = self._import_package(package_path) self._delete_package(package) finally: os.remove(package_path) @types.convert(package={"type": "expand_user_path"}) @validation.add("file_exists", param_name="package", mode=os.F_OK) @validation.add("required_services", services=[consts.Service.MURANO]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["murano.packages"]}, name="MuranoPackages.package_lifecycle", platform="openstack") class PackageLifecycle(utils.MuranoScenario): def run(self, package, body, operation="replace"): """Import Murano package, modify it and then delete it. Measure the Murano import, update and delete package commands performance. It imports Murano package from "package" (if it is not a zip archive then zip archive will be prepared), modifies it (using data from "body") and deletes. :param package: path to zip archive that represents Murano application package or absolute path to folder with package components :param body: dict object that defines what package property will be updated, e.g {"tags": ["tag"]} or {"enabled": "true"} :param operation: string object that defines the way of how package property will be updated, allowed operations are "add", "replace" or "delete". Default value is "replace". """ package_path = self._zip_package(package) try: package = self._import_package(package_path) self._update_package(package, body, operation) self._delete_package(package) finally: os.remove(package_path) @types.convert(package={"type": "expand_user_path"}) @validation.add("file_exists", param_name="package", mode=os.F_OK) @validation.add("required_services", services=[consts.Service.MURANO]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["murano.packages"]}, name="MuranoPackages.import_and_filter_applications", platform="openstack") class ImportAndFilterApplications(utils.MuranoScenario): def run(self, package, filter_query): """Import Murano package and then filter packages by some criteria. Measure the performance of package import and package filtering commands. It imports Murano package from "package" (if it is not a zip archive then zip archive will be prepared) and filters packages by some criteria. :param package: path to zip archive that represents Murano application package or absolute path to folder with package components :param filter_query: dict that contains filter criteria, lately it will be passed as **kwargs to filter method e.g. {"category": "Web"} """ package_path = self._zip_package(package) try: self._import_package(package_path) self._filter_applications(filter_query) finally: os.remove(package_path)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,819
openstack/rally-openstack
refs/heads/master
/tests/unit/task/contexts/monasca/test_metrics.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally_openstack.task.contexts.monasca import metrics from rally_openstack.task.scenarios.monasca import utils as monasca_utils from tests.unit import test CTX = "rally_openstack.task.contexts.monasca" class MonascaMetricGeneratorTestCase(test.TestCase): def _gen_tenants(self, count): tenants = {} for id in range(count): tenants[str(id)] = {"name": str(id)} return tenants def _gen_context(self, tenants_count, users_per_tenant, metrics_per_tenant): tenants = self._gen_tenants(tenants_count) users = [] for id in tenants.keys(): for i in range(users_per_tenant): users.append({"id": i, "tenant_id": id, "endpoint": mock.MagicMock()}) context = test.get_test_context() context.update({ "config": { "users": { "tenants": tenants_count, "users_per_tenant": users_per_tenant, "concurrent": 10, }, "monasca_metrics": { "name": "fake-metric-name", "dimensions": { "region": "fake-region", "service": "fake-identity", "hostname": "fake-hostname", "url": "fake-url" }, "metrics_per_tenant": metrics_per_tenant, }, "roles": [ "monasca-user" ] }, "admin": { "endpoint": mock.MagicMock() }, "users": users, "tenants": tenants }) return tenants, context @mock.patch("%s.metrics.rutils.interruptable_sleep" % CTX) @mock.patch("%s.metrics.monasca_utils.MonascaScenario" % CTX) def test_setup(self, mock_monasca_scenario, mock_interruptable_sleep): tenants_count = 2 users_per_tenant = 4 metrics_per_tenant = 5 tenants, real_context = self._gen_context( tenants_count, users_per_tenant, metrics_per_tenant) monasca_ctx = metrics.MonascaMetricGenerator(real_context) monasca_ctx.setup() self.assertEqual(tenants_count, mock_monasca_scenario.call_count, "Scenario should be constructed same times as " "number of tenants") self.assertEqual(metrics_per_tenant * tenants_count, mock_monasca_scenario.return_value._create_metrics. call_count, "Total number of metrics created should be tenant" "counts times metrics per tenant") first_call = mock.call(0.001) second_call = mock.call(monasca_utils.CONF.openstack. monasca_metric_create_prepoll_delay, atomic_delay=1) self.assertEqual( [first_call] * metrics_per_tenant * tenants_count + [second_call], mock_interruptable_sleep.call_args_list, "Method interruptable_sleep should be called tenant counts times " "metrics plus one")
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,820
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/nova/server_groups.py
# Copyright 2017: Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import logging from rally.task import validation from rally_openstack.common import consts from rally_openstack.task import scenario from rally_openstack.task.scenarios.nova import utils LOG = logging.getLogger(__name__) """Scenarios for Nova Group servers.""" @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServerGroups.create_and_list_server_groups", platform="openstack") class CreateAndListServerGroups(utils.NovaScenario): def run(self, policies=None, all_projects=False, kwargs=None): """Create a server group, then list all server groups. Measure the "nova server-group-create" and "nova server-group-list" command performance. :param policies: Server group policy :param all_projects: If True, display server groups from all projects(Admin only) :param kwargs: The server group specifications to add. DEPRECATED, specify arguments explicitly. """ if kwargs is None: kwargs = { "policies": policies } else: LOG.warning("The argument `kwargs` is deprecated since" " Rally 0.10.0. Specify all arguments from it" " explicitly.") server_group = self._create_server_group(**kwargs) msg = ("Server Groups isn't created") self.assertTrue(server_group, err_msg=msg) server_groups_list = self._list_server_groups(all_projects) msg = ("Server Group not included into list of server groups\n" "Created server group: {}\n" "list of server groups: {}").format(server_group, server_groups_list) self.assertIn(server_group, server_groups_list, err_msg=msg) @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServerGroups.create_and_get_server_group", platform="openstack") class CreateAndGetServerGroup(utils.NovaScenario): def run(self, policies=None, kwargs=None): """Create a server group, then get its detailed information. Measure the "nova server-group-create" and "nova server-group-get" command performance. :param policies: Server group policy :param kwargs: The server group specifications to add. DEPRECATED, specify arguments explicitly. """ if kwargs is None: kwargs = { "policies": policies } else: LOG.warning("The argument `kwargs` is deprecated since" " Rally 0.10.0. Specify all arguments from it" " explicitly.") server_group = self._create_server_group(**kwargs) msg = ("Server Groups isn't created") self.assertTrue(server_group, err_msg=msg) server_group_info = self._get_server_group(server_group.id) self.assertEqual(server_group.id, server_group_info.id) @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServerGroups.create_and_delete_server_group", platform="openstack") class CreateAndDeleteServerGroup(utils.NovaScenario): def run(self, policies=None, kwargs=None): """Create a server group, then delete it. Measure the "nova server-group-create" and "nova server-group-delete" command performance. :param policies: Server group policy :param kwargs: The server group specifications to add. DEPRECATED, specify arguments explicitly. """ if kwargs is None: kwargs = { "policies": policies } else: LOG.warning("The argument `kwargs` is deprecated since" " Rally 0.10.0. Specify all arguments from it" " explicitly.") server_group = self._create_server_group(**kwargs) msg = ("Server Group isn't created") self.assertTrue(server_group, err_msg=msg) self._delete_server_group(server_group.id)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,821
openstack/rally-openstack
refs/heads/master
/rally_openstack/common/services/image/glance_v1.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from rally.common import cfg from rally.common import utils as rutils from rally.task import atomic from rally.task import utils from rally_openstack.common import service from rally_openstack.common.services.image import glance_common from rally_openstack.common.services.image import image CONF = cfg.CONF @service.service("glance", service_type="image", version="1") class GlanceV1Service(service.Service, glance_common.GlanceMixin): @atomic.action_timer("glance_v1.create_image") def create_image(self, image_name=None, container_format=None, image_location=None, disk_format=None, is_public=True, min_disk=0, min_ram=0, properties=None): """Creates new image. :param image_name: Image name for which need to be created :param container_format: Container format :param image_location: The new image's location :param disk_format: Disk format :param is_public: The created image's public status :param min_disk: The min disk of created images :param min_ram: The min ram of created images :param properties: Dict of image properties """ image_location = os.path.expanduser(image_location) image_name = image_name or self.generate_random_name() kwargs = {} try: if os.path.isfile(image_location): kwargs["data"] = open(image_location, "rb") else: kwargs["copy_from"] = image_location image_obj = self._clients.glance("1").images.create( name=image_name, container_format=container_format, disk_format=disk_format, is_public=is_public, min_disk=min_disk, min_ram=min_ram, properties=properties, **kwargs) rutils.interruptable_sleep(CONF.openstack. glance_image_create_prepoll_delay) image_obj = utils.wait_for_status( image_obj, ["active"], update_resource=self.get_image, timeout=CONF.openstack.glance_image_create_timeout, check_interval=CONF.openstack.glance_image_create_poll_interval ) finally: if "data" in kwargs: kwargs["data"].close() return image_obj @atomic.action_timer("glance_v1.update_image") def update_image(self, image_id, image_name=None, min_disk=0, min_ram=0): """Update image. :param image_id: ID of image to update :param image_name: Image name to be updated to :param min_disk: The min disk of updated image :param min_ram: The min ram of updated image """ image_name = image_name or self.generate_random_name() return self._clients.glance("1").images.update(image_id, name=image_name, min_disk=min_disk, min_ram=min_ram) @atomic.action_timer("glance_v1.list_images") def list_images(self, status="active", is_public=None, owner=None): """List images. :param status: Filter in images for the specified status :param is_public: Filter in images for the specified public status :param owner: Filter in images for tenant ID """ # NOTE(boris-42): image.list() is lazy method which doesn't query API # until it's used, do not remove list(). return list(self._clients.glance("1").images.list(status=status, owner=owner, is_public=is_public)) @atomic.action_timer("glance_v1.set_visibility") def set_visibility(self, image_id, is_public=True): """Update visibility. :param image_id: ID of image to update :param is_public: Image is public or not """ self._clients.glance("1").images.update(image_id, is_public=is_public) @service.compat_layer(GlanceV1Service) class UnifiedGlanceV1Service(glance_common.UnifiedGlanceMixin, image.Image): """Compatibility layer for Glance V1.""" @staticmethod def _check_v1_visibility(visibility): visibility_values = ["public", "private"] if visibility and visibility not in visibility_values: raise image.VisibilityException( message="Improper visibility value: %s in glance_v1" % visibility) def create_image(self, image_name=None, container_format=None, image_location=None, disk_format=None, visibility="public", min_disk=0, min_ram=0, properties=None): """Creates new image. :param image_name: Image name for which need to be created :param container_format: Container format :param image_location: The new image's location :param disk_format: Disk format :param visibility: The created image's visible status :param min_disk: The min disk of created images :param min_ram: The min ram of created images :param properties: Dict of image properties """ self._check_v1_visibility(visibility) is_public = visibility != "private" image_obj = self._impl.create_image( image_name=image_name, container_format=container_format, image_location=image_location, disk_format=disk_format, is_public=is_public, min_disk=min_disk, min_ram=min_ram, properties=properties) return self._unify_image(image_obj) def update_image(self, image_id, image_name=None, min_disk=0, min_ram=0, remove_props=None): """Update image. :param image_id: ID of image to update :param image_name: Image name to be updated to :param min_disk: The min disk of updated image :param min_ram: The min ram of updated image :param remove_props: List of property names to remove """ if remove_props is not None: raise image.RemovePropsException("Remove prop: %s is not " "supported in " "glance_v1" % remove_props) image_obj = self._impl.update_image( image_id=image_id, image_name=image_name, min_disk=min_disk, min_ram=min_ram) return self._unify_image(image_obj) def list_images(self, status="active", visibility=None, owner=None): """List images. :param status: Filter in images for the specified status :param visibility: Filter in images for the specified visibility :param owner: Filter in images for tenant ID """ self._check_v1_visibility(visibility) is_public = visibility != "private" images = self._impl.list_images(status=status, is_public=is_public) return [self._unify_image(i) for i in images] def set_visibility(self, image_id, visibility="public"): """Update visibility. :param image_id: ID of image to update :param visibility: The visibility of specified image """ self._check_v1_visibility(visibility) is_public = visibility != "private" self._impl.set_visibility(image_id=image_id, is_public=is_public)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,822
openstack/rally-openstack
refs/heads/master
/tests/unit/fakes.py
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import multiprocessing import random import re import string from unittest import mock import uuid from glanceclient import exc from neutronclient.common import exceptions as neutron_exceptions from novaclient import exceptions as nova_exceptions from swiftclient import exceptions as swift_exceptions from rally import api from rally.common import utils as rally_utils from rally.task import context from rally.task import scenario from rally_openstack.common import consts from rally_openstack.common import credential def generate_uuid(): return str(uuid.uuid4()) def generate_name(prefix="", length=12, choices=string.ascii_lowercase): """Generate pseudo-random name. :param prefix: str, custom prefix for genertated name :param length: int, length of autogenerated part of result name :param choices: str, chars that accurs in generated name :returns: str, pseudo-random name """ return prefix + "".join(random.choice(choices) for i in range(length)) def generate_mac(): """Generate pseudo-random MAC address. :returns: str, MAC address """ rand_str = generate_name(choices="0123456789abcdef", length=12) return ":".join(re.findall("..", rand_str)) def setup_dict(data, required=None, defaults=None): """Setup and validate dict scenario_base on mandatory keys and default data This function reduces code that constructs dict objects with specific schema (e.g. for API data). :param data: dict, input data :param required: list, mandatory keys to check :param defaults: dict, default data :returns: dict, with all keys set :raises IndexError, ValueError: If input data is incorrect """ required = required or [] for i in set(required) - set(data): raise IndexError("Missed: %s" % i) defaults = defaults or {} for i in set(data) - set(required) - set(defaults): raise ValueError("Unexpected: %s" % i) defaults.update(data) return defaults class FakeCredential(credential.OpenStackCredential): def __init__(self, **creds): creds.setdefault("auth_url", "https://example.com") creds.setdefault("username", "admin") creds.setdefault("password", "pass") super(FakeCredential, self).__init__(**creds) self.clients = mock.Mock() class FakeResource(object): def __init__(self, manager=None, name=None, status="ACTIVE", items=None, deployment_uuid=None, id=None): self.name = name or generate_uuid() self.status = status self.manager = manager self.uuid = generate_uuid() self.id = id or self.uuid self.items = items or {} self.deployment_uuid = deployment_uuid or generate_uuid() def __getattr__(self, name): # NOTE(msdubov): e.g. server.delete() -> manager.delete(server) def manager_func(*args, **kwargs): return getattr(self.manager, name)(self, *args, **kwargs) return manager_func def __getitem__(self, key): return self.items[key] class FakeServer(FakeResource): def suspend(self): self.status = "SUSPENDED" def lock(self): setattr(self, "OS-EXT-STS:locked", True) def unlock(self): setattr(self, "OS-EXT-STS:locked", False) class FakeImage(FakeResource): def __init__(self, manager=None, id="image-id-0", min_ram=0, size=0, min_disk=0, status="active", name=None): super(FakeImage, self).__init__(manager, id=id, name=name) self.min_ram = min_ram self.size = size self.min_disk = min_disk self.status = status self.update = mock.MagicMock() class FakeStrategy(FakeResource): pass class FakeGoal(FakeResource): pass class FakeMurano(FakeResource): pass class FakeFloatingIP(FakeResource): pass class FakeFloatingIPPool(FakeResource): pass class FakeTenant(FakeResource): def __init__(self, manager, name): super(FakeTenant, self).__init__(manager, name=name) class FakeUser(FakeResource): pass class FakeService(FakeResource): pass class FakeNetwork(FakeResource): pass class FakeFlavor(FakeResource): def __init__(self, id="flavor-id-0", manager=None, ram=0, disk=0, vcpus=1, name="flavor-name-0"): super(FakeFlavor, self).__init__(manager, id=id) self.ram = ram self.disk = disk self.vcpus = vcpus self.name = name class FakeSecret(FakeResource): def __init__(self, id="secret-id-0", manager=None, secret_ref="secret_ref", name="secret-name-0"): super(FakeSecret, self).__init__(manager, id=id) self.secret_ref = secret_ref class FakeLoadBalancer(FakeResource): pass class FakeKeypair(FakeResource): pass class FakeStack(FakeResource): pass class FakeDomain(FakeResource): pass class FakeQuotas(FakeResource): pass class FakeSecurityGroup(FakeResource): def __init__(self, manager=None, rule_manager=None, id=None, name=None): super(FakeSecurityGroup, self).__init__(manager, id=id, name=name) self.rule_manager = rule_manager @property def rules(self): return [rule for rule in self.rule_manager.list() if rule.parent_group_id == self.id] class FakeSecurityGroupRule(FakeResource): def __init__(self, name, **kwargs): super(FakeSecurityGroupRule, self).__init__(name) if "cidr" in kwargs: kwargs["ip_range"] = {"cidr": kwargs["cidr"]} del kwargs["cidr"] for key, value in kwargs.items(): self.items[key] = value setattr(self, key, value) class FakeMetric(FakeResource): def __init_(self, manager=None, **kwargs): super(FakeMetric, self).__init__(manager) self.metric = kwargs.get("metric_name") self.optional_args = kwargs.get("optional_args", {}) class FakeAlarm(FakeResource): def __init__(self, manager=None, **kwargs): super(FakeAlarm, self).__init__(manager) self.meter_name = kwargs.get("meter_name") self.threshold = kwargs.get("threshold") self.state = kwargs.get("state", "fake-alarm-state") self.alarm_id = kwargs.get("alarm_id", "fake-alarm-id") self.state = kwargs.get("state", "ok") self.optional_args = kwargs.get("optional_args", {}) class FakeSample(FakeResource): def __init__(self, manager=None, **kwargs): super(FakeSample, self).__init__(manager) self.counter_name = kwargs.get("counter_name", "fake-counter-name") self.counter_type = kwargs.get("counter_type", "fake-counter-type") self.counter_unit = kwargs.get("counter_unit", "fake-counter-unit") self.counter_volume = kwargs.get("counter_volume", 100) @property def resource_id(self): return "fake-resource-id" def to_dict(self): return {"counter_name": self.counter_name, "counter_type": self.counter_type, "counter_unit": self.counter_unit, "counter_volume": self.counter_volume, "resource_id": self.resource_id} class FakeVolume(FakeResource): @property def _info(self): return {"id": "uuid"} class FakeVolumeType(FakeResource): pass class FakeVolumeTransfer(FakeResource): pass class FakeVolumeSnapshot(FakeResource): pass class FakeVolumeBackup(FakeResource): pass class FakeRole(FakeResource): pass class FakeQueue(FakeResource): def __init__(self, manager=None, name="myqueue"): super(FakeQueue, self).__init__(manager, name) self.queue_name = name self.messages = FakeMessagesManager(name) def post(self, messages): for msg in messages: self.messages.create(**msg) def messages(self): return self.messages.list() class FakeDbInstance(FakeResource): pass class FakeMessage(FakeResource): def __init__(self, manager=None, **kwargs): super(FakeMessage, self).__init__(manager) self.body = kwargs.get("body", "fake-body") self.ttl = kwargs.get("ttl", 100) class FakeAvailabilityZone(FakeResource): def __init__(self, manager=None): super(FakeAvailabilityZone, self).__init__(manager) self.zoneName = mock.MagicMock() self.zoneState = mock.MagicMock() self.hosts = mock.MagicMock() class FakeWorkbook(FakeResource): def __init__(self, manager=None): super(FakeWorkbook, self).__init__(manager) self.workbook = mock.MagicMock() class FakeWorkflow(FakeResource): def __init__(self, manager=None): super(FakeWorkflow, self).__init__(manager) self.workflow = mock.MagicMock() class FakeExecution(FakeResource): def __init__(self, manager=None): super(FakeExecution, self).__init__(manager) self.execution = mock.MagicMock() class FakeObject(FakeResource): pass class FakeClusterTemplate(FakeResource): pass class FakeManager(object): def __init__(self): super(FakeManager, self).__init__() self.cache = {} self.resources_order = [] def get(self, resource_uuid): return self.cache.get(resource_uuid) def delete(self, resource_uuid): cached = self.get(resource_uuid) if cached is not None: cached.status = "DELETED" del self.cache[resource_uuid] self.resources_order.remove(resource_uuid) def _cache(self, resource): self.resources_order.append(resource.uuid) self.cache[resource.uuid] = resource return resource def list(self, **kwargs): return [self.cache[key] for key in self.resources_order] def find(self, **kwargs): for resource in self.cache.values(): match = True for key, value in kwargs.items(): if getattr(resource, key, None) != value: match = False break if match: return resource class FakeServerManager(FakeManager): def __init__(self, image_mgr=None): super(FakeServerManager, self).__init__() self.images = image_mgr or FakeImageManager() def get(self, resource_uuid): server = self.cache.get(resource_uuid) if server is not None: return server raise nova_exceptions.NotFound("Server %s not found" % (resource_uuid)) def _create(self, server_class=FakeServer, name=None): server = self._cache(server_class(self)) if name is not None: server.name = name return server def create(self, name, image_id, flavor_id, **kwargs): return self._create(name=name) def create_image(self, server, name): image = self.images._create() return image.uuid def add_floating_ip(self, server, fip): pass def remove_floating_ip(self, server, fip): pass def delete(self, resource): if not isinstance(resource, str): resource = resource.id cached = self.get(resource) if cached is not None: cached.status = "DELETED" del self.cache[resource] self.resources_order.remove(resource) class FakeImageManager(FakeManager): def __init__(self): super(FakeImageManager, self).__init__() def get(self, resource_uuid): image = self.cache.get(resource_uuid) if image is not None: return image raise exc.HTTPNotFound("Image %s not found" % (resource_uuid)) def _create(self, image_class=FakeImage, name=None, id=None): image = self._cache(image_class(self)) image.owner = "dummy" image.id = image.uuid if name is not None: image.name = name return image def create(self, name, copy_from, container_format, disk_format): return self._create(name=name) def delete(self, resource): if not isinstance(resource, str): resource = resource.id cached = self.get(resource) if cached is not None: cached.status = "DELETED" del self.cache[resource] self.resources_order.remove(resource) class FakeStrategyManager(FakeManager): def get(self, resource_name): for key in self.resources_order: if self.cache[key].name == resource_name: return self.cache[key] class FakeGoalManager(FakeManager): def get(self, resource_name): for key in self.resources_order: if self.cache[key].name == resource_name: return self.cache[key] class FakePackageManager(FakeManager): def create(self, package_descr, package_arch, package_class=FakeMurano): package = self._cache(package_class(self)) package.name = list(package_arch.keys())[0] return package class FakeFloatingIPsManager(FakeManager): def create(self): return FakeFloatingIP(self) class FakeFloatingIPPoolsManager(FakeManager): def create(self): return FakeFloatingIPPool(self) class FakeTenantsManager(FakeManager): def create(self, name): return self._cache(FakeTenant(self, name)) def update(self, tenant_id, name=None, description=None): tenant = self.get(tenant_id) name = name or (tenant.name + "_updated") desc = description or (tenant.name + "_description_updated") tenant.name = name tenant.description = desc return self._cache(tenant) class FakeNetworkManager(FakeManager): def create(self, net_id): net = FakeNetwork(self) net.id = net_id return self._cache(net) class FakeFlavorManager(FakeManager): def create(self): flv = FakeFlavor(self) return self._cache(flv) class FakeKeypairManager(FakeManager): def create(self, name, public_key=None): kp = FakeKeypair(self) kp.name = name or kp.name return self._cache(kp) def delete(self, resource): if not isinstance(resource, str): resource = resource.id cached = self.get(resource) if cached is not None: cached.status = "DELETED" del self.cache[resource] self.resources_order.remove(resource) class FakeClusterTemplateManager(FakeManager): def create(self, name): cluster_template = FakeClusterTemplate(self) cluster_template.name = name or cluster_template.name return self._cache(cluster_template) def delete(self, resource): if not isinstance(resource, str): resource = resource.id cached = self.get(resource) if cached is not None: del self.cache[resource] self.resources_order.remove(resource) class FakeStackManager(FakeManager): def create(self, name): stack = FakeStack(self) stack.name = name or stack.name return self._cache(stack) def delete(self, resource): if not isinstance(resource, str): resource = resource.id cached = self.get(resource) if cached is not None: cached.status = "DELETE_COMPLETE" del self.cache[resource] self.resources_order.remove(resource) class FakeDomainManager(FakeManager): def create(self, name): domain = FakeDomain(self) domain.name = name or domain.name return self._cache(domain) def delete(self, resource): if not isinstance(resource, str): resource = resource.id cached = self.get(resource) if cached is not None: cached.status = "DELETE_COMPLETE" del self.cache[resource] self.resources_order.remove(resource) class FakeNovaQuotasManager(FakeManager): def update(self, tenant_id, **kwargs): fq = FakeQuotas(self) return self._cache(fq) def delete(self, tenant_id): pass class FakeCinderQuotasManager(FakeManager): def update(self, tenant_id, **kwargs): fq = FakeQuotas(self) return self._cache(fq) def delete(self, tenant_id): pass class FakeSecurityGroupManager(FakeManager): def __init__(self, rule_manager=None): super(FakeSecurityGroupManager, self).__init__() self.rule_manager = rule_manager self.create("default") def create(self, name, description=""): sg = FakeSecurityGroup( manager=self, rule_manager=self.rule_manager) sg.name = name or sg.name sg.description = description return self._cache(sg) def to_dict(self, obj): return {"id": obj.id, "name": obj.name} def find(self, name, **kwargs): kwargs["name"] = name for resource in self.cache.values(): match = True for key, value in kwargs.items(): if getattr(resource, key, None) != value: match = False break if match: return resource raise nova_exceptions.NotFound("Security Group not found") def delete(self, resource): if not isinstance(resource, str): resource = resource.id cached = self.get(resource) if cached is not None: cached.status = "DELETED" del self.cache[resource] self.resources_order.remove(resource) class FakeSecurityGroupRuleManager(FakeManager): def __init__(self): super(FakeSecurityGroupRuleManager, self).__init__() def create(self, parent_group_id, **kwargs): kwargs["parent_group_id"] = parent_group_id sgr = FakeSecurityGroupRule(self, **kwargs) return self._cache(sgr) class FakeUsersManager(FakeManager): def create(self, username, password, email, tenant_id): user = FakeUser(manager=self, name=username) user.name = username or user.name return self._cache(user) class FakeServicesManager(FakeManager): def list(self): return [] class FakeVolumeManager(FakeManager): def __init__(self): super(FakeVolumeManager, self).__init__() self.__volumes = {} self.__tenant_id = generate_uuid() def create(self, size=None, **kwargs): volume = FakeVolume(self) volume.size = size or 1 volume.name = kwargs.get("display_name", volume.name) volume.status = "available" volume.tenant_id = self.__tenant_id self.__volumes[volume.id] = volume return self._cache(volume) def list(self): return self.__volumes.values() def delete(self, resource): super(FakeVolumeManager, self).delete(resource.id) del self.__volumes[resource.id] class FakeVolumeTypeManager(FakeManager): def create(self, name): vol_type = FakeVolumeType(self) vol_type.name = name or vol_type.name return self._cache(vol_type) class FakeVolumeTransferManager(FakeManager): def __init__(self): super(FakeVolumeTransferManager, self).__init__() self.__volume_transfers = {} def list(self): return self.__volume_transfers.values() def create(self, name): transfer = FakeVolumeTransfer(self) transfer.name = name or transfer.name self.__volume_transfers[transfer.id] = transfer return self._cache(transfer) def delete(self, resource): super(FakeVolumeTransferManager, self).delete(resource.id) del self.__volume_transfers[resource.id] class FakeVolumeSnapshotManager(FakeManager): def __init__(self): super(FakeVolumeSnapshotManager, self).__init__() self.__snapshots = {} self.__tenant_id = generate_uuid() def create(self, name, force=False, display_name=None): snapshot = FakeVolumeSnapshot(self) snapshot.name = name or snapshot.name snapshot.status = "available" snapshot.tenant_id = self.__tenant_id self.__snapshots[snapshot.id] = snapshot return self._cache(snapshot) def list(self): return self.__snapshots.values() def delete(self, resource): super(FakeVolumeSnapshotManager, self).delete(resource.id) del self.__snapshots[resource.id] class FakeVolumeBackupManager(FakeManager): def __init__(self): super(FakeVolumeBackupManager, self).__init__() self.__backups = {} self.__tenant_id = generate_uuid() def create(self, name): backup = FakeVolumeBackup(self) backup.name = name or backup.name self.__backups[backup.id] = backup return self._cache(backup) def list(self): return self.__backups.values() def delete(self, resource): super(FakeVolumeBackupManager, self).delete(resource.id) del self.__backups[resource.id] class FakeRolesManager(FakeManager): def create(self, role_id, name): role = FakeRole(self) role.name = name role.id = role_id return self._cache(role) def roles_for_user(self, user, tenant): role = FakeRole(self) role.name = "admin" return [role, ] def add_user_role(self, user, role, tenant): pass class FakeMetricManager(FakeManager): def create(self, **kwargs): metric = FakeMetric(self, **kwargs) return self._cache(metric) def get(self, metric_id): metric = self.find(metric_id=metric_id) return [metric] class FakeMetricsManager(FakeManager): def list(self): return ["fake-metric"] class FakeQueuesManager(FakeManager): def __init__(self): super(FakeQueuesManager, self).__init__() self.__queues = {} def create(self, name): queue = FakeQueue(self, name) self.__queues[queue.name] = queue return self._cache(queue) def list(self): return self.__queues.values() def delete(self, queue): super(FakeQueuesManager, self).delete(queue.name) del self.__queues[queue.name] class FakeDbInstanceManager(FakeManager): def __init__(self): super(FakeDbInstanceManager, self).__init__() self.__db_instances = {} def create(self, name, flavor_id, size): instance = FakeDbInstance(self) instance.name = name or instance.name instance.flavor_id = flavor_id instance.size = size return self._cache(instance) def list(self): return self.__db_instances.values() def delete(self, resource): if not isinstance(resource, str): resource = resource.id cached = self.get(resource) if cached is not None: cached.status = "DELETE_COMPLETE" del self.cache[resource] self.resources_order.remove(resource) class FakeMessagesManager(FakeManager): def __init__(self, queue="myqueue"): super(FakeMessagesManager, self).__init__() self.__queue = queue self.__messages = {} def create(self, **kwargs): message = FakeMessage(self, **kwargs) self.__messages[message.id] = message return self._cache(message) def list(self): return self.__messages.values() def delete(self, message): super(FakeMessagesManager, self).delete(message.id) del self.__messages[message.id] class FakeAvailabilityZonesManager(FakeManager): def __init__(self): super(FakeAvailabilityZonesManager, self).__init__() self.zones = FakeAvailabilityZone() def list(self): return [self.zones] class FakeWorkbookManager(FakeManager): def __init__(self): super(FakeWorkbookManager, self).__init__() self.workbook = FakeWorkbook() def list(self): return [self.workbook] class FakeWorkflowManager(FakeManager): def __init__(self): super(FakeWorkflowManager, self).__init__() self.workflow = FakeWorkflow() def list(self): return [self.workflow] class FakeExecutionManager(FakeManager): def __init__(self): super(FakeExecutionManager, self).__init__() self.execution = FakeExecution() def list(self): return [self.execution] def create(self): return self.execution class FakeObjectManager(FakeManager): def get_account(self, **kwargs): containers = self.list() return (mock.MagicMock(), [{"name": con.name} for con in containers]) def get_container(self, name, **kwargs): container = self.find(name=name) if container is None: raise swift_exceptions.ClientException("Container GET failed") return (mock.MagicMock(), [{"name": obj} for obj in container.items]) def put_container(self, name, **kwargs): if self.find(name=name): raise swift_exceptions.ClientException("Container PUT failed") self._cache(FakeObject(name=name)) def delete_container(self, name, **kwargs): container = self.find(name=name) if container is None or len(container.items.keys()) > 0: raise swift_exceptions.ClientException("Container DELETE failed") self.delete(container.uuid) def get_object(self, container_name, object_name, **kwargs): container = self.find(name=container_name) if container is None or object_name not in container.items: raise swift_exceptions.ClientException("Object GET failed") return (mock.MagicMock(), container.items[object_name]) def put_object(self, container_name, object_name, content, **kwargs): container = self.find(name=container_name) if container is None: raise swift_exceptions.ClientException("Object PUT failed") container.items[object_name] = content return mock.MagicMock() def delete_object(self, container_name, object_name, **kwargs): container = self.find(name=container_name) if container is None or object_name not in container.items: raise swift_exceptions.ClientException("Object DELETE failed") del container.items[object_name] class FakeServiceCatalog(object): def get_credentials(self): return {"image": [{"publicURL": "http://fake.to"}], "metering": [{"publicURL": "http://fake.to"}], "monitoring": [{"publicURL": "http://fake.to"}]} def url_for(self, **kwargs): return "http://fake.to" class FakeGlanceClient(object): def __init__(self, version="1"): self.images = FakeImageManager() self.version = version class FakeMuranoClient(object): def __init__(self): self.packages = FakePackageManager() class FakeCinderClient(object): def __init__(self): self.volumes = FakeVolumeManager() self.volume_types = FakeVolumeTypeManager() self.transfers = FakeVolumeTransferManager() self.volume_snapshots = FakeVolumeSnapshotManager() self.backups = FakeVolumeBackupManager() self.quotas = FakeCinderQuotasManager() class FakeNovaClient(object): def __init__(self, failed_server_manager=False): self.images = FakeImageManager() self.servers = FakeServerManager(self.images) self.floating_ips = FakeFloatingIPsManager() self.floating_ip_pools = FakeFloatingIPPoolsManager() self.networks = FakeNetworkManager() self.flavors = FakeFlavorManager() self.keypairs = FakeKeypairManager() self.security_group_rules = FakeSecurityGroupRuleManager() self.security_groups = FakeSecurityGroupManager( rule_manager=self.security_group_rules) self.quotas = FakeNovaQuotasManager() self.set_management_url = mock.MagicMock() self.availability_zones = FakeAvailabilityZonesManager() class FakeHeatClient(object): def __init__(self): self.stacks = FakeStackManager() class FakeDesignateClient(object): def __init__(self): self.domains = FakeDomainManager() class FakeKeystoneClient(object): def __init__(self): self.tenants = FakeTenantsManager() self.users = FakeUsersManager() self.roles = FakeRolesManager() self.project_id = "abc123" self.auth_url = "http://example.com:5000/v2.0/" self.auth_token = "fake" self.auth_user_id = generate_uuid() self.auth_tenant_id = generate_uuid() self.service_catalog = FakeServiceCatalog() self.services = FakeServicesManager() self.region_name = "RegionOne" self.auth_ref = mock.Mock() self.auth_ref.role_names = ["admin"] self.version = "v2.0" self.session = mock.MagicMock() self.authenticate = mock.MagicMock() def authenticate(self): return True def list_users(self): return self.users.list() def list_projects(self): return self.tenants.list() def list_services(self): return self.services.list() def list_roles(self): return self.roles.list() def delete_user(self, uuid): return self.users.delete(uuid) class FakeGnocchiClient(object): def __init__(self): self.metric = FakeMetricManager() class FakeMonascaClient(object): def __init__(self): self.metrics = FakeMetricsManager() class FakeNeutronClient(object): def __init__(self, **kwargs): self.__networks = {} self.__subnets = {} self.__routers = {} self.__ports = {} self.__pools = {} self.__vips = {} self.__fips = {} self.__healthmonitors = {} self.__tenant_id = kwargs.get("tenant_id", generate_uuid()) self.format = "json" self.version = "2.0" @staticmethod def _filter(resource_list, search_opts): return [res for res in resource_list if all(res[field] == value for field, value in search_opts.items())] def add_interface_router(self, router_id, data): subnet_id = data["subnet_id"] if (router_id not in self.__routers or subnet_id not in self.__subnets): raise neutron_exceptions.NeutronClientException subnet = self.__subnets[subnet_id] port = self.create_port( {"port": {"network_id": subnet["network_id"]}})["port"] port["device_id"] = router_id port["fixed_ips"].append({"subnet_id": subnet_id, "ip_address": subnet["gateway_ip"]}) return {"subnet_id": subnet_id, "tenant_id": port["tenant_id"], "port_id": port["id"], "id": router_id} def create_network(self, data): network = setup_dict(data["network"], defaults={"name": generate_name("net_"), "admin_state_up": True}) network_id = generate_uuid() network.update({"id": network_id, "status": "ACTIVE", "subnets": [], "provider:physical_network": None, "tenant_id": self.__tenant_id, "provider:network_type": "local", "router:external": True, "shared": False, "provider:segmentation_id": None}) self.__networks[network_id] = network return {"network": network} def create_pool(self, data): pool = setup_dict(data["pool"], required=["lb_method", "protocol", "subnet_id"], defaults={"name": generate_name("pool_"), "admin_state_up": True}) if pool["subnet_id"] not in self.__subnets: raise neutron_exceptions.NeutronClientException pool_id = generate_uuid() pool.update({"id": pool_id, "status": "PENDING_CREATE", "tenant_id": self.__tenant_id}) self.__pools[pool_id] = pool return {"pool": pool} def create_vip(self, data): vip = setup_dict(data["vip"], required=["protocol_port", "protocol", "subnet_id", "pool_id"], defaults={"name": generate_name("vip_"), "admin_state_up": True}) if (vip["subnet_id"] not in self.__subnets) or (vip["pool_id"] not in self.__pools): raise neutron_exceptions.NeutronClientException vip_id = generate_uuid() vip.update({"id": vip_id, "status": "PENDING_CREATE", "tenant_id": self.__tenant_id}) self.__vips[vip_id] = vip return {"vip": vip} def create_floatingip(self, data): fip = setup_dict(data["floatingip"], required=["floating_network"], defaults={"admin_state_up": True}) if (fip["floating_network"] not in self.__nets): raise neutron_exceptions.NeutronClientException fip_id = generate_uuid() fip.update({"id": fip_id, "tenant_id": self.__tenant_id}) self.__fips[fip_id] = fip return {"fip": fip} def create_health_monitor(self, data): healthmonitor = setup_dict(data["healthmonitor"], required=["type", "timeout", "delay", "max_retries"], defaults={"admin_state_up": True}) healthmonitor_id = generate_uuid() healthmonitor.update({"id": healthmonitor_id, "status": "PENDING_CREATE", "tenant_id": self.__tenant_id}) self.__healthmonitors[healthmonitor_id] = healthmonitor return {"healthmonitor": healthmonitor} def create_port(self, data): port = setup_dict(data["port"], required=["network_id"], defaults={"name": generate_name("port_"), "admin_state_up": True}) if port["network_id"] not in self.__networks: raise neutron_exceptions.NeutronClientException port_id = generate_uuid() port.update({"id": port_id, "status": "ACTIVE", "binding:host_id": "fakehost", "extra_dhcp_opts": [], "binding:vnic_type": "normal", "binding:vif_type": "ovs", "device_owner": "", "mac_address": generate_mac(), "binding:profile": {}, "binding:vif_details": {u"port_filter": True}, "security_groups": [], "fixed_ips": [], "device_id": "", "tenant_id": self.__tenant_id, "allowed_address_pairs": []}) self.__ports[port_id] = port return {"port": port} def create_router(self, data): router = setup_dict(data["router"], defaults={"name": generate_name("router_"), "external_gateway_info": None, "admin_state_up": True}) router_id = generate_uuid() router.update({"id": router_id, "status": "ACTIVE", "external_gateway_info": None, "tenant_id": self.__tenant_id}) self.__routers[router_id] = router return {"router": router} def create_subnet(self, data): subnet = setup_dict( data["subnet"], required=["network_id", "cidr", "ip_version"], defaults={"name": generate_name("subnet_"), "dns_nameservers": ["8.8.8.8", "8.8.4.4"]}) if subnet["network_id"] not in self.__networks: raise neutron_exceptions.NeutronClientException subnet_id = generate_uuid() subnet.update({"id": subnet_id, "enable_dhcp": True, "tenant_id": self.__tenant_id, "ipv6_ra_mode": None, "allocation_pools": [], "gateway_ip": re.sub("./.*$", "1", subnet["cidr"]), "ipv6_address_mode": None, "ip_version": 4, "host_routes": []}) self.__subnets[subnet_id] = subnet return {"subnet": subnet} def update_resource(self, resource_id, resource_dict, data): if resource_id not in resource_dict: raise neutron_exceptions.NeutronClientException self.resource_list[resource_id].update(data) def update_network(self, network_id, data): self.update_resource(network_id, self.__networks, data) def update_pool(self, pool_id, data): self.update_resource(pool_id, self.__pools, data) def update_vip(self, vip_id, data): self.update_resource(vip_id, self.__vips, data) def update_health_monitor(self, healthmonitor_id, data): self.update_resource(healthmonitor_id, self.__healthmonitors, data) def update_subnet(self, subnet_id, data): self.update_resource(subnet_id, self.__subnets, data) def update_port(self, port_id, data): self.update_resource(port_id, self.__ports, data) def update_router(self, router_id, data): self.update_resource(router_id, self.__routers, data) def delete_network(self, network_id): if network_id not in self.__networks: raise neutron_exceptions.NeutronClientException for port in self.__ports.values(): if port["network_id"] == network_id: # Network is in use by port raise neutron_exceptions.NeutronClientException del self.__networks[network_id] return "" def delete_pool(self, pool_id): if pool_id not in self.__pools: raise neutron_exceptions.NeutronClientException del self.__pools[pool_id] return "" def delete_vip(self, vip_id): if vip_id not in self.__vips: raise neutron_exceptions.NeutronClientException del self.__vips[vip_id] def delete_health_monitor(self, healthmonitor_id): if healthmonitor_id not in self.__healthmonitors: raise neutron_exceptions.NeutronClientException del self.__healthmonitors[healthmonitor_id] return "" def delete_floatingip(self, fip_id): if fip_id not in self.__fips: raise neutron_exceptions.NeutronClientException del self.__fips[fip_id] return "" def delete_port(self, port_id): if port_id not in self.__ports: raise neutron_exceptions.PortNotFoundClient if self.__ports[port_id]["device_owner"]: # Port is owned by some device raise neutron_exceptions.NeutronClientException del self.__ports[port_id] return "" def delete_router(self, router_id): if router_id not in self.__routers: raise neutron_exceptions.NeutronClientException for port in self.__ports.values(): if port["device_id"] == router_id: # Router has active port raise neutron_exceptions.NeutronClientException del self.__routers[router_id] return "" def delete_subnet(self, subnet_id): if subnet_id not in self.__subnets: raise neutron_exceptions.NeutronClientException for port in self.__ports.values(): for fip in port["fixed_ips"]: if fip["subnet_id"] == subnet_id: # Subnet has IP allocation from some port raise neutron_exceptions.NeutronClientException del self.__subnets[subnet_id] return "" def list_networks(self, **search_opts): nets = self._filter(self.__networks.values(), search_opts) return {"networks": nets} def list_pools(self, **search_opts): pools = self._filter(self.__pools.values(), search_opts) return {"pools": pools} def list_vips(self, **search_opts): vips = self._filter(self.__vips.values(), search_opts) return {"vips": vips} def list_health_monitors(self, **search_opts): healthmonitors = self._filter( self.__healthmonitors.values(), search_opts) return {"healthmonitors": healthmonitors} def list_ports(self, **search_opts): ports = self._filter(self.__ports.values(), search_opts) return {"ports": ports} def list_routers(self, **search_opts): routers = self._filter(self.__routers.values(), search_opts) return {"routers": routers} def list_subnets(self, **search_opts): subnets = self._filter(self.__subnets.values(), search_opts) return {"subnets": subnets} def list_floatingips(self, **search_opts): fips = self._filter(self.__fips.values(), search_opts) return {"floatingips": fips} def remove_interface_router(self, router_id, data): subnet_id = data["subnet_id"] if (router_id not in self.__routers or subnet_id not in self.__subnets): raise neutron_exceptions.NeutronClientException subnet = self.__subnets[subnet_id] for port_id, port in self.__ports.items(): if port["device_id"] == router_id: for fip in port["fixed_ips"]: if fip["subnet_id"] == subnet_id: del self.__ports[port_id] return {"subnet_id": subnet_id, "tenant_id": subnet["tenant_id"], "port_id": port_id, "id": router_id} raise neutron_exceptions.NeutronClientException def associate_health_monitor(self, pool_id, healthmonitor_id): if pool_id not in self.__pools: raise neutron_exceptions.NeutronClientException if healthmonitor_id not in self.__healthmonitors: raise neutron_exceptions.NeutronClientException self.__pools[pool_id]["pool"]["healthmonitors"] = healthmonitor_id return {"pool": self.__pools[pool_id]} def disassociate_health_monitor(self, pool_id, healthmonitor_id): if pool_id not in self.__pools: raise neutron_exceptions.NeutronClientException if healthmonitor_id not in self.__healthmonitors: raise neutron_exceptions.NeutronClientException del self.__pools[pool_id]["pool"]["healthmonitors"][healthmonitor_id] return "" class FakeOctaviaClient(object): def __init__(self): pass class FakeIronicClient(object): def __init__(self): # TODO(romcheg):Fake Manager subclasses to manage BM nodes. pass class FakeSaharaClient(object): def __init__(self): self.job_executions = mock.MagicMock() self.jobs = mock.MagicMock() self.job_binary_internals = mock.MagicMock() self.job_binaries = mock.MagicMock() self.data_sources = mock.MagicMock() self.clusters = mock.MagicMock() self.cluster_templates = mock.MagicMock() self.node_group_templates = mock.MagicMock() self.setup_list_methods() def setup_list_methods(self): mock_with_id = mock.MagicMock() mock_with_id.id = 42 # First call of list returns a list with one object, the next should # empty after delete. self.job_executions.list.side_effect = [[mock_with_id], []] self.jobs.list.side_effect = [[mock_with_id], []] self.job_binary_internals.list.side_effect = [[mock_with_id], []] self.job_binaries.list.side_effect = [[mock_with_id], []] self.data_sources.list.side_effect = [[mock_with_id], []] self.clusters.list.side_effect = [[mock_with_id], []] self.cluster_templates.list.side_effect = [[mock_with_id], []] self.node_group_templates.list.side_effect = [[mock_with_id], []] class FakeZaqarClient(object): def __init__(self): self.queues = FakeQueuesManager() def queue(self, name, **kwargs): return self.queues.create(name, **kwargs) class FakeTroveClient(object): def __init__(self): self.instances = FakeDbInstanceManager() class FakeMistralClient(object): def __init__(self): self.workbook = FakeWorkbookManager() self.workflow = FakeWorkflowManager() self.execution = FakeExecutionManager() class FakeSwiftClient(FakeObjectManager): pass class FakeEC2Client(object): def __init__(self): pass class FakeSenlinClient(object): def __init__(self): # TODO(Yanyan Hu):Fake interfaces of senlinclient. pass class FakeMagnumClient(object): def __init__(self): self.cluster_templates = FakeClusterTemplateManager() class FakeWatcherClient(object): def __init__(self): self.strategy = FakeStrategyManager() self.goal = FakeGoalManager() class FakeBarbicanClient(object): def __init__(self): pass class FakeClients(object): def __init__(self, credential_=None): self._nova = None self._glance = None self._keystone = None self._cinder = None self._neutron = None self._octavia = None self._sahara = None self._heat = None self._designate = None self._zaqar = None self._trove = None self._mistral = None self._swift = None self._murano = None self._monasca = None self._ec2 = None self._senlin = None self._watcher = None self._barbican = None self._credential = credential_ or FakeCredential( auth_url="http://fake.example.org:5000/v2.0/", username="fake_username", password="fake_password", tenant_name="fake_tenant_name") def keystone(self, version=None): if not self._keystone: self._keystone = FakeKeystoneClient() return self._keystone def verified_keystone(self): return self.keystone() def nova(self): if not self._nova: self._nova = FakeNovaClient() return self._nova def glance(self, version="1"): if not self._glance: self._glance = FakeGlanceClient(version) return self._glance def cinder(self): if not self._cinder: self._cinder = FakeCinderClient() return self._cinder def neutron(self): if not self._neutron: self._neutron = FakeNeutronClient() return self._neutron def octavia(self): if not self._octavia: self._octavia = FakeOctaviaClient() return self._octavia def sahara(self): if not self._sahara: self._sahara = FakeSaharaClient() return self._sahara def heat(self): if not self._heat: self._heat = FakeHeatClient() return self._heat def designate(self): if not self._designate: self._designate = FakeDesignateClient() return self._designate def monasca(self): if not self._monasca: self._monasca = FakeMonascaClient() return self._monasca def zaqar(self): if not self._zaqar: self._zaqar = FakeZaqarClient() return self._zaqar def trove(self): if not self._trove: self._trove = FakeTroveClient() return self._trove def mistral(self): if not self._mistral: self._mistral = FakeMistralClient() return self._mistral def swift(self): if not self._swift: self._swift = FakeSwiftClient() return self._swift def murano(self): if not self._murano: self._murano = FakeMuranoClient() return self._murano def ec2(self): if not self._ec2: self._ec2 = FakeEC2Client() return self._ec2 def senlin(self): if not self._senlin: self._senlin = FakeSenlinClient() return self._senlin def watcher(self): if not self._watcher: self._watcher = FakeWatcherClient() return self._watcher def barbican(self): if not self._barbican: self._barbican = FakeBarbicanClient() return self._barbican class FakeRunner(object): CONFIG_SCHEMA = { "type": "object", "$schema": consts.JSON_SCHEMA, "properties": { "type": { "type": "string", "enum": ["fake"] }, "a": { "type": "string" }, "b": { "type": "number" } }, "required": ["type", "a"] } class FakeScenario(scenario.Scenario): def idle_time(self): return 0 def do_it(self, **kwargs): pass def with_output(self, **kwargs): return {"data": {"a": 1}, "error": None} def with_add_output(self): self.add_output(additive={"title": "Additive", "description": "Additive description", "data": [["a", 1]], "chart_plugin": "FooPlugin"}, complete={"title": "Complete", "description": "Complete description", "data": [["a", [[1, 2], [2, 3]]]], "chart_plugin": "BarPlugin"}) def too_long(self, **kwargs): pass def something_went_wrong(self, **kwargs): raise Exception("Something went wrong") def raise_timeout(self, **kwargs): raise multiprocessing.TimeoutError() @scenario.configure(name="classbased.fooscenario") class FakeClassBasedScenario(FakeScenario): """Fake class-based scenario.""" def run(self, *args, **kwargs): pass class FakeTimer(rally_utils.Timer): def duration(self): return 10 def timestamp(self): return 0 def finish_timestamp(self): return 3 @context.configure(name="fake", order=1) class FakeContext(context.Context): CONFIG_SCHEMA = { "type": "object", "$schema": consts.JSON_SCHEMA, "properties": { "test": { "type": "integer" }, }, "additionalProperties": False } def __init__(self, context_obj=None): context_obj = context_obj or {} context_obj.setdefault("config", {}) context_obj["config"].setdefault("fake", None) context_obj.setdefault("task", mock.MagicMock()) super(FakeContext, self).__init__(context_obj) def setup(self): pass def cleanup(self): pass @context.configure(name="fake_hidden_context", order=1, hidden=True) class FakeHiddenContext(FakeContext): pass @context.configure(name="fake_user_context", order=1) class FakeUserContext(FakeContext): admin = { "id": "adminuuid", "credential": FakeCredential( auth_url="aurl", username="aname", password="apwd", tenant_name="atenant") } user = { "id": "uuid", "credential": FakeCredential( auth_url="url", username="name", password="pwd", tenant_name="tenant"), "tenant_id": "uuid" } tenants = {"uuid": {"name": "tenant"}} def __init__(self, ctx): super(FakeUserContext, self).__init__(ctx) self.context.setdefault("admin", FakeUserContext.admin) self.context.setdefault("users", [FakeUserContext.user]) self.context.setdefault("tenants", FakeUserContext.tenants) self.context.setdefault( "scenario_name", "NovaServers.boot_server_from_volume_and_delete") class FakeDeployment(dict): def __init__(self, **kwargs): platform = kwargs.pop("platform", "openstack") kwargs["credentials"] = { platform: [{"admin": kwargs.pop("admin", None), "users": kwargs.pop("users", [])}], "default": [{"admin": None, "users": []}]} dict.__init__(self, **kwargs) self.update_status = mock.Mock() self.env_obj = mock.Mock() def get_platforms(self): return [platform for platform in self["credentials"]] def get_credentials_for(self, platform): return self["credentials"][platform][0] def verify_connections(self): pass def get_validation_context(self): return {} class FakeEnvironment(object): def __init__(self, env_uuid, data): self.uuid = env_uuid self.data = data @property def cached_data(self): return self.data class FakeTask(dict, object): def __init__(self, task=None, temporary=False, **kwargs): self.is_temporary = temporary self.update_status = mock.Mock() self.set_failed = mock.Mock() self.set_validation_failed = mock.Mock() task = task or {} for k, v in itertools.chain(task.items(), kwargs.items()): self[k] = v self.task = self def to_dict(self): return self class FakeAPI(object): def __init__(self): self._deployment = mock.create_autospec(api._Deployment) self._task = mock.create_autospec(api._Task) self._verifier = mock.create_autospec(api._Verifier) self._verification = mock.create_autospec(api._Verification) @property def deployment(self): return self._deployment @property def task(self): return self._task @property def verifier(self): return self._verifier @property def verification(self): return self._verification
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,823
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/zaqar/utils.py
# Copyright (c) 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.task import atomic from rally_openstack.task import scenario class ZaqarScenario(scenario.OpenStackScenario): """Base class for Zaqar scenarios with basic atomic actions.""" @atomic.action_timer("zaqar.create_queue") def _queue_create(self, **kwargs): """Create a Zaqar queue with random name. :param kwargs: other optional parameters to create queues like "metadata" :returns: Zaqar queue instance """ name = self.generate_random_name() return self.clients("zaqar").queue(name, **kwargs) @atomic.action_timer("zaqar.delete_queue") def _queue_delete(self, queue): """Removes a Zaqar queue. :param queue: queue to remove """ queue.delete() def _messages_post(self, queue, messages, min_msg_count, max_msg_count): """Post a list of messages to a given Zaqar queue. :param queue: post the messages to queue :param messages: messages to post :param min_msg_count: minimum number of messages :param max_msg_count: maximum number of messages """ with atomic.ActionTimer(self, "zaqar.post_between_%s_and_%s_messages" % (min_msg_count, max_msg_count)): queue.post(messages) @atomic.action_timer("zaqar.list_messages") def _messages_list(self, queue): """Gets messages from a given Zaqar queue. :param queue: get messages from queue :returns: messages iterator """ return queue.messages()
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,824
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/gnocchi/metric.py
# Copyright 2017 Red Hat, Inc. <http://www.redhat.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.task import validation from rally_openstack.common import consts from rally_openstack.task import scenario from rally_openstack.task.scenarios.gnocchi import utils as gnocchiutils """Scenarios for Gnocchi metric.""" @validation.add("required_services", services=[consts.Service.GNOCCHI]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(name="GnocchiMetric.list_metric") class ListMetric(gnocchiutils.GnocchiBase): def run(self, limit=None): """List metrics. :param limit: Maximum number of metrics to list """ self.gnocchi.list_metric(limit=limit) @validation.add("required_services", services=[consts.Service.GNOCCHI]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["gnocchi.metric"]}, name="GnocchiMetric.create_metric") class CreateMetric(gnocchiutils.GnocchiBase): def run(self, archive_policy_name="low", resource_id=None, unit=None): """Create metric. :param archive_policy_name: Archive policy name :param resource_id: The resource ID to attach the metric to :param unit: The unit of the metric """ name = self.generate_random_name() self.gnocchi.create_metric(name, archive_policy_name=archive_policy_name, resource_id=resource_id, unit=unit) @validation.add("required_services", services=[consts.Service.GNOCCHI]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["gnocchi.metric"]}, name="GnocchiMetric.create_delete_metric") class CreateDeleteMetric(gnocchiutils.GnocchiBase): def run(self, archive_policy_name="low", resource_id=None, unit=None): """Create metric and then delete it. :param archive_policy_name: Archive policy name :param resource_id: The resource ID to attach the metric to :param unit: The unit of the metric """ name = self.generate_random_name() metric = self.gnocchi.create_metric( name, archive_policy_name=archive_policy_name, resource_id=resource_id, unit=unit) self.gnocchi.delete_metric(metric["id"])
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,825
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/neutron/network.py
# Copyright 2014: Intel Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import cfg from rally.common import logging from rally.task import validation from rally_openstack.common import consts from rally_openstack.task import scenario from rally_openstack.task.scenarios.neutron import utils LOG = logging.getLogger(__name__) """Scenarios for Neutron.""" @validation.add("restricted_parameters", param_names="name", subdict="network_create_args") @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronNetworks.create_and_list_networks", platform="openstack") class CreateAndListNetworks(utils.NeutronBaseScenario): def run(self, network_create_args=None): """Create a network and then list all networks. Measure the "neutron net-list" command performance. If you have only 1 user in your context, you will add 1 network on every iteration. So you will have more and more networks and will be able to measure the performance of the "neutron net-list" command depending on the number of networks owned by users. :param network_create_args: dict, POST /v2.0/networks request options """ self.neutron.create_network(**(network_create_args or {})) self.neutron.list_networks() @validation.add("restricted_parameters", param_names="name", subdict="network_create_args") @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronNetworks.create_and_show_network", platform="openstack") class CreateAndShowNetwork(utils.NeutronBaseScenario): def run(self, network_create_args=None): """Create a network and show network details. Measure the "neutron net-show" command performance. :param network_create_args: dict, POST /v2.0/networks request options """ network = self.neutron.create_network(**(network_create_args or {})) self.neutron.get_network(network["id"]) @validation.add("restricted_parameters", param_names="name", subdict="network_create_args") @validation.add("restricted_parameters", param_names="name", subdict="network_update_args") @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronNetworks.create_and_update_networks", platform="openstack") class CreateAndUpdateNetworks(utils.NeutronBaseScenario): def run(self, network_update_args, network_create_args=None): """Create and update a network. Measure the "neutron net-create and net-update" command performance. :param network_update_args: dict, PUT /v2.0/networks update request :param network_create_args: dict, POST /v2.0/networks request options """ network = self.neutron.create_network(**(network_create_args or {})) self.neutron.update_network(network["id"], **network_update_args) @validation.add("restricted_parameters", param_names="name", subdict="network_create_args") @validation.add("required_services", services=[consts.Service.NEUTRON]) @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronNetworks.create_and_delete_networks", platform="openstack") class CreateAndDeleteNetworks(utils.NeutronBaseScenario): def run(self, network_create_args=None): """Create and delete a network. Measure the "neutron net-create" and "net-delete" command performance. :param network_create_args: dict, POST /v2.0/networks request options """ network = self.neutron.create_network(**(network_create_args or {})) self.neutron.delete_network(network["id"]) @validation.add("restricted_parameters", param_names="name", subdict="network_create_args") @validation.add("restricted_parameters", param_names="name", subdict="subnet_create_args") @validation.add("number", param_name="subnets_per_network", minval=1, integer_only=True) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronNetworks.create_and_list_subnets", platform="openstack") class CreateAndListSubnets(utils.NeutronBaseScenario): def run(self, network_create_args=None, subnet_create_args=None, subnet_cidr_start=None, subnets_per_network=1): """Create and a given number of subnets and list all subnets. The scenario creates a network, a given number of subnets and then lists subnets. :param network_create_args: dict, POST /v2.0/networks request options. Deprecated :param subnet_create_args: dict, POST /v2.0/subnets request options :param subnet_cidr_start: str, start value for subnets CIDR :param subnets_per_network: int, number of subnets for one network """ network = self.neutron.create_network(**(network_create_args or {})) for _ in range(subnets_per_network): self.neutron.create_subnet(network["id"], start_cidr=subnet_cidr_start, **(subnet_create_args or {})) self.neutron.list_subnets() @validation.add("restricted_parameters", param_names="name", subdict="network_create_args") @validation.add("restricted_parameters", param_names="name", subdict="subnet_create_args") @validation.add("restricted_parameters", param_names="name", subdict="subnet_update_args") @validation.add("number", param_name="subnets_per_network", minval=1, integer_only=True) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronNetworks.create_and_update_subnets", platform="openstack") class CreateAndUpdateSubnets(utils.NeutronBaseScenario): def run(self, subnet_update_args, network_create_args=None, subnet_create_args=None, subnet_cidr_start=None, subnets_per_network=1): """Create and update a subnet. The scenario creates a network, a given number of subnets and then updates the subnet. This scenario measures the "neutron subnet-update" command performance. :param subnet_update_args: dict, PUT /v2.0/subnets update options :param network_create_args: dict, POST /v2.0/networks request options. Deprecated. :param subnet_create_args: dict, POST /v2.0/subnets request options :param subnet_cidr_start: str, start value for subnets CIDR :param subnets_per_network: int, number of subnets for one network """ network = self.neutron.create_network(**(network_create_args or {})) subnets = [] for _ in range(subnets_per_network): subnets.append( self.neutron.create_subnet( network["id"], start_cidr=subnet_cidr_start, **(subnet_create_args or {})) ) for subnet in subnets: self.neutron.update_subnet(subnet["id"], **subnet_update_args) @validation.add("restricted_parameters", param_names="name", subdict="network_create_args") @validation.add("restricted_parameters", param_names="name", subdict="subnet_create_args") @validation.add("number", param_name="subnets_per_network", minval=1, integer_only=True) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronNetworks.create_and_show_subnets", platform="openstack") class CreateAndShowSubnets(utils.NeutronBaseScenario): def run(self, network_create_args=None, subnet_create_args=None, subnet_cidr_start=None, subnets_per_network=1): """Create and show a subnet details. The scenario creates a network, a given number of subnets and show the subnet details. This scenario measures the "neutron subnet-show" command performance. :param network_create_args: dict, POST /v2.0/networks request options. :param subnet_create_args: dict, POST /v2.0/subnets request options :param subnet_cidr_start: str, start value for subnets CIDR :param subnets_per_network: int, number of subnets for one network """ network = self._get_or_create_network(**(network_create_args or {})) subnets = [] for _ in range(subnets_per_network): subnets.append( self.neutron.create_subnet( network["id"], start_cidr=subnet_cidr_start, **(subnet_create_args or {})) ) for subnet in subnets: self.neutron.get_subnet(subnet["id"]) @validation.add("restricted_parameters", param_names="name", subdict="network_create_args") @validation.add("restricted_parameters", param_names="name", subdict="subnet_create_args") @validation.add("number", param_name="subnets_per_network", minval=1, integer_only=True) @validation.add("required_services", services=[consts.Service.NEUTRON]) @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronNetworks.create_and_delete_subnets", platform="openstack") class CreateAndDeleteSubnets(utils.NeutronBaseScenario): def run(self, network_create_args=None, subnet_create_args=None, subnet_cidr_start=None, subnets_per_network=1): """Create and delete a given number of subnets. The scenario creates a network, a given number of subnets and then deletes subnets. :param network_create_args: dict, POST /v2.0/networks request options. Deprecated. :param subnet_create_args: dict, POST /v2.0/subnets request options :param subnet_cidr_start: str, start value for subnets CIDR :param subnets_per_network: int, number of subnets for one network """ network = self._get_or_create_network(**(network_create_args or {})) subnets = [] for _ in range(subnets_per_network): subnets.append( self.neutron.create_subnet( network["id"], start_cidr=subnet_cidr_start, **(subnet_create_args or {})) ) for subnet in subnets: self.neutron.delete_subnet(subnet["id"]) @validation.add("restricted_parameters", param_names="name", subdict="network_create_args") @validation.add("restricted_parameters", param_names="name", subdict="subnet_create_args") @validation.add("restricted_parameters", param_names="name", subdict="router_create_args") @validation.add("number", param_name="subnets_per_network", minval=1, integer_only=True) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronNetworks.create_and_list_routers", platform="openstack") class CreateAndListRouters(utils.NeutronBaseScenario): def run(self, network_create_args=None, subnet_create_args=None, subnet_cidr_start=None, subnets_per_network=1, router_create_args=None): """Create and a given number of routers and list all routers. Create a network, a given number of subnets and routers and then list all routers. :param network_create_args: dict, POST /v2.0/networks request options. Deprecated. :param subnet_create_args: dict, POST /v2.0/subnets request options :param subnet_cidr_start: str, start value for subnets CIDR :param subnets_per_network: int, number of subnets for one network :param router_create_args: dict, POST /v2.0/routers request options """ subnet_create_args = dict(subnet_create_args or {}) subnet_create_args["start_cidr"] = subnet_cidr_start self.neutron.create_network_topology( network_create_args=(network_create_args or {}), router_create_args=(router_create_args or {}), router_per_subnet=True, subnet_create_args=subnet_create_args, subnets_count=subnets_per_network ) self.neutron.list_routers() @validation.add("restricted_parameters", param_names="name", subdict="network_create_args") @validation.add("restricted_parameters", param_names="name", subdict="subnet_create_args") @validation.add("restricted_parameters", param_names="name", subdict="router_create_args") @validation.add("number", param_name="subnets_per_network", minval=1, integer_only=True) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronNetworks.create_and_show_routers", platform="openstack") class CreateAndShowRouters(utils.NeutronBaseScenario): def run(self, network_create_args=None, subnet_create_args=None, subnet_cidr_start=None, subnets_per_network=1, router_create_args=None): """Create and show a given number of routers. Create a network, a given number of subnets and routers and then show all routers. :param network_create_args: dict, POST /v2.0/networks request options :param subnet_create_args: dict, POST /v2.0/subnets request options :param subnet_cidr_start: str, start value for subnets CIDR :param subnets_per_network: int, number of subnets for each network :param router_create_args: dict, POST /v2.0/routers request options """ subnet_create_args = dict(subnet_create_args or {}) subnet_create_args["start_cidr"] = subnet_cidr_start net_topo = self.neutron.create_network_topology( network_create_args=(network_create_args or {}), router_create_args=(router_create_args or {}), router_per_subnet=True, subnet_create_args=subnet_create_args, subnets_count=subnets_per_network ) for router in net_topo["routers"]: self.neutron.get_router(router["id"]) @validation.add("restricted_parameters", param_names="name", subdict="network_create_args") @validation.add("restricted_parameters", param_names="name", subdict="subnet_create_args") @validation.add("restricted_parameters", param_names="name", subdict="router_create_args") @validation.add("restricted_parameters", param_names="name", subdict="router_update_args") @validation.add("number", param_name="subnets_per_network", minval=1, integer_only=True) @validation.add("required_services", services=[consts.Service.NEUTRON]) @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronNetworks.create_and_update_routers", platform="openstack") class CreateAndUpdateRouters(utils.NeutronBaseScenario): def run(self, router_update_args, network_create_args=None, subnet_create_args=None, subnet_cidr_start=None, subnets_per_network=1, router_create_args=None): """Create and update a given number of routers. Create a network, a given number of subnets and routers and then updating all routers. :param router_update_args: dict, PUT /v2.0/routers update options :param network_create_args: dict, POST /v2.0/networks request options. Deprecated. :param subnet_create_args: dict, POST /v2.0/subnets request options :param subnet_cidr_start: str, start value for subnets CIDR :param subnets_per_network: int, number of subnets for one network :param router_create_args: dict, POST /v2.0/routers request options """ subnet_create_args = dict(subnet_create_args or {}) subnet_create_args["start_cidr"] = subnet_cidr_start net_topo = self.neutron.create_network_topology( network_create_args=(network_create_args or {}), router_create_args=(router_create_args or {}), router_per_subnet=True, subnet_create_args=subnet_create_args, subnets_count=subnets_per_network ) for router in net_topo["routers"]: self.neutron.update_router(router["id"], **router_update_args) @validation.add("restricted_parameters", param_names="name", subdict="network_create_args") @validation.add("restricted_parameters", param_names="name", subdict="subnet_create_args") @validation.add("restricted_parameters", param_names="name", subdict="router_create_args") @validation.add("number", param_name="subnets_per_network", minval=1, integer_only=True) @validation.add("required_services", services=[consts.Service.NEUTRON]) @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronNetworks.create_and_delete_routers", platform="openstack") class CreateAndDeleteRouters(utils.NeutronBaseScenario): def run(self, network_create_args=None, subnet_create_args=None, subnet_cidr_start=None, subnets_per_network=1, router_create_args=None): """Create and delete a given number of routers. Create a network, a given number of subnets and routers and then delete all routers. :param network_create_args: dict, POST /v2.0/networks request options. Deprecated. :param subnet_create_args: dict, POST /v2.0/subnets request options :param subnet_cidr_start: str, start value for subnets CIDR :param subnets_per_network: int, number of subnets for one network :param router_create_args: dict, POST /v2.0/routers request options """ subnet_create_args = dict(subnet_create_args or {}) subnet_create_args["start_cidr"] = subnet_cidr_start net_topo = self.neutron.create_network_topology( network_create_args=(network_create_args or {}), router_create_args=(router_create_args or {}), router_per_subnet=True, subnet_create_args=subnet_create_args, subnets_count=subnets_per_network ) for e in range(subnets_per_network): router = net_topo["routers"][e] subnet = net_topo["subnets"][e] self.neutron.remove_interface_from_router(subnet_id=subnet["id"], router_id=router["id"]) self.neutron.delete_router(router["id"]) @validation.add("restricted_parameters", param_names="name", subdict="network_create_args") @validation.add("restricted_parameters", param_names="name", subdict="router_create_args") @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronNetworks.set_and_clear_router_gateway", platform="openstack") class SetAndClearRouterGateway(utils.NeutronBaseScenario): def run(self, enable_snat=True, network_create_args=None, router_create_args=None): """Set and Remove the external network gateway from a router. create an external network and a router, set external network gateway for the router, remove the external network gateway from the router. :param enable_snat: True if enable snat :param network_create_args: dict, POST /v2.0/networks request options :param router_create_args: dict, POST /v2.0/routers request options """ network_create_args = network_create_args or {} router_create_args = router_create_args or {} ext_net = self.neutron.create_network(**network_create_args) router = self.neutron.create_router(**router_create_args) self.neutron.add_gateway_to_router(router_id=router["id"], network_id=ext_net["id"], enable_snat=enable_snat) self.neutron.remove_gateway_from_router(router["id"]) @validation.add("restricted_parameters", param_names="name", subdict="network_create_args") @validation.add("restricted_parameters", param_names="name", subdict="port_create_args") @validation.add("number", param_name="ports_per_network", minval=1, integer_only=True) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronNetworks.create_and_list_ports", platform="openstack") class CreateAndListPorts(utils.NeutronBaseScenario): def run(self, network_create_args=None, port_create_args=None, ports_per_network=1): """Create and a given number of ports and list all ports. :param network_create_args: dict, POST /v2.0/networks request options. Deprecated. :param port_create_args: dict, POST /v2.0/ports request options :param ports_per_network: int, number of ports for one network """ network = self._get_or_create_network(**(network_create_args or {})) for i in range(ports_per_network): self.neutron.create_port(network["id"], **(port_create_args or {})) self.neutron.list_ports() @validation.add("restricted_parameters", param_names="name", subdict="network_create_args") @validation.add("restricted_parameters", param_names="name", subdict="port_create_args") @validation.add("restricted_parameters", param_names="name", subdict="port_update_args") @validation.add("number", param_name="ports_per_network", minval=1, integer_only=True) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronNetworks.create_and_update_ports", platform="openstack") class CreateAndUpdatePorts(utils.NeutronBaseScenario): def run(self, port_update_args, network_create_args=None, port_create_args=None, ports_per_network=1): """Create and update a given number of ports. Measure the "neutron port-create" and "neutron port-update" commands performance. :param port_update_args: dict, PUT /v2.0/ports update request options :param network_create_args: dict, POST /v2.0/networks request options. Deprecated. :param port_create_args: dict, POST /v2.0/ports request options :param ports_per_network: int, number of ports for one network """ network = self._get_or_create_network(**(network_create_args or {})) for i in range(ports_per_network): port = self.neutron.create_port( network["id"], **(port_create_args or {})) self.neutron.update_port(port["id"], **port_update_args) @validation.add("restricted_parameters", param_names="name", subdict="network_create_args") @validation.add("restricted_parameters", param_names="name", subdict="port_create_args") @validation.add("number", param_name="ports_per_network", minval=1, integer_only=True) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronNetworks.create_and_show_ports", platform="openstack") class CreateAndShowPorts(utils.NeutronBaseScenario): def run(self, network_create_args=None, port_create_args=None, ports_per_network=1): """Create a given number of ports and show created ports in trun. Measure the "neutron port-create" and "neutron port-show" commands performance. :param network_create_args: dict, POST /v2.0/networks request options. :param port_create_args: dict, POST /v2.0/ports request options :param ports_per_network: int, number of ports for one network """ network = self._get_or_create_network(**(network_create_args or {})) for i in range(ports_per_network): port = self.neutron.create_port( network["id"], **(port_create_args or {})) self.neutron.get_port(port["id"]) @validation.add("restricted_parameters", param_names="name", subdict="network_create_args") @validation.add("restricted_parameters", param_names="name", subdict="port_create_args") @validation.add("number", param_name="ports_per_network", minval=1, integer_only=True) @validation.add("required_services", services=[consts.Service.NEUTRON]) @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronNetworks.create_and_delete_ports", platform="openstack") class CreateAndDeletePorts(utils.NeutronBaseScenario): def run(self, network_create_args=None, port_create_args=None, ports_per_network=1): """Create and delete a port. Measure the "neutron port-create" and "neutron port-delete" commands performance. :param network_create_args: dict, POST /v2.0/networks request options. Deprecated. :param port_create_args: dict, POST /v2.0/ports request options :param ports_per_network: int, number of ports for one network """ network = self._get_or_create_network(**(network_create_args or {})) for i in range(ports_per_network): port = self.neutron.create_port( network["id"], **(port_create_args or {})) self.neutron.delete_port(port["id"]) @validation.add("number", param_name="ports_per_network", minval=1, integer_only=True) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_contexts", contexts=["network", "networking_agents"]) @validation.add("required_platform", platform="openstack", users=True, admin=True) @scenario.configure(context={"cleanup@openstack": ["neutron"], "networking_agents@openstack": {}, "network@openstack": {}}, name="NeutronNetworks.create_and_bind_ports", platform="openstack") class CreateAndBindPorts(utils.NeutronBaseScenario): def run(self, ports_per_network=1): """Bind a given number of ports. Measure the performance of port binding and all of its pre-requisites: * openstack network create * openstack subnet create --ip-version 4 * openstack subnet create --ip-version 6 * openstack port create * openstack port update (binding) :param ports_per_network: int, number of ports for one network """ # NOTE(bence romsics): Find a host where we can expect to bind # successfully. Look at agent types used in the gate. host_to_bind = None for agent in self.context["networking_agents"]: if (agent["admin_state_up"] and agent["alive"] and agent["agent_type"] in cfg.CONF.openstack.neutron_bind_l2_agent_types): host_to_bind = agent["host"] if host_to_bind is None: raise Exception( "No live agent of type(s) to bind was found: %s" % ", ".join(cfg.CONF.openstack.neutron_bind_l2_agent_types)) tenant_id = self.context["tenant"]["id"] for network in self.context["tenants"][tenant_id]["networks"]: self.neutron.create_subnet(network_id=network["id"], ip_version=4) self.neutron.create_subnet(network_id=network["id"], ip_version=6) for i in range(ports_per_network): port = self.neutron.create_port(network_id=network["id"]) # port bind needs admin role self.admin_neutron.update_port( port_id=port["id"], device_owner="compute:nova", device_id="ba805478-85ff-11e9-a2e4-2b8dea218fc8", **{"binding:host_id": host_to_bind}, ) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", users=True) @validation.add("external_network_exists", param_name="floating_network") @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronNetworks.create_and_list_floating_ips", platform="openstack") class CreateAndListFloatingIps(utils.NeutronBaseScenario): def run(self, floating_network=None, floating_ip_args=None): """Create and list floating IPs. Measure the "neutron floating-ip-create" and "neutron floating-ip-list" commands performance. :param floating_network: str, external network for floating IP creation :param floating_ip_args: dict, POST /floatingips request options """ floating_ip_args = floating_ip_args or {} self.neutron.create_floatingip(floating_network=floating_network, **floating_ip_args) self.neutron.list_floatingips() @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", users=True) @validation.add("external_network_exists", param_name="floating_network") @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronNetworks.create_and_delete_floating_ips", platform="openstack") class CreateAndDeleteFloatingIps(utils.NeutronBaseScenario): def run(self, floating_network=None, floating_ip_args=None): """Create and delete floating IPs. Measure the "neutron floating-ip-create" and "neutron floating-ip-delete" commands performance. :param floating_network: str, external network for floating IP creation :param floating_ip_args: dict, POST /floatingips request options """ floating_ip_args = floating_ip_args or {} floatingip = self.neutron.create_floatingip( floating_network=floating_network, **floating_ip_args) self.neutron.delete_floatingip(floatingip["id"]) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", users=True) @validation.add("external_network_exists", param_name="floating_network") @scenario.configure( context={"cleanup@openstack": ["neutron"]}, name="NeutronNetworks.associate_and_dissociate_floating_ips", platform="openstack") class AssociateAndDissociateFloatingIps(utils.NeutronBaseScenario): def run(self, floating_network=None): """Associate and dissociate floating IPs. Measure the "openstack floating ip set" and "openstack floating ip unset" commands performance. Because of the prerequisites for "floating ip set/unset" we also measure the performance of the following commands: * "openstack network create" * "openstack subnet create" * "openstack port create" * "openstack router create" * "openstack router set --external-gateway" * "openstack router add subnet" :param floating_network: str, external network for floating IP creation """ floating_network = self.neutron.find_network(floating_network, external=True) floating_ip = self.neutron.create_floatingip( floating_network=floating_network) private_network = self.neutron.create_network() subnet = self.neutron.create_subnet(network_id=private_network["id"]) port = self.neutron.create_port(network_id=private_network["id"]) router = self.neutron.create_router() self.neutron.add_gateway_to_router( router["id"], network_id=floating_network["id"]) self.neutron.add_interface_to_router( subnet_id=subnet["id"], router_id=router["id"]) self.neutron.associate_floatingip( floatingip_id=floating_ip["id"], port_id=port["id"]) self.neutron.dissociate_floatingip(floatingip_id=floating_ip["id"]) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(name="NeutronNetworks.list_agents", platform="openstack") class ListAgents(utils.NeutronBaseScenario): def run(self, agent_args=None): """List all neutron agents. This simple scenario tests the "neutron agent-list" command by listing all the neutron agents. :param agent_args: dict, POST /v2.0/agents request options """ agent_args = agent_args or {} self.neutron.list_agents(**agent_args) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_contexts", contexts=["network"]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronSubnets.delete_subnets", platform="openstack") class DeleteSubnets(utils.NeutronBaseScenario): def run(self): """Delete a subnet that belongs to each precreated network. Each runner instance picks a specific subnet from the list based on its positional location in the list of users. By doing so, we can start multiple threads with sufficient number of users created and spread delete requests across all of them, so that they hit different subnets concurrently. Concurrent execution of this scenario should help reveal any race conditions and other concurrency issues in Neutron IP allocation layer, among other things. """ tenant_id = self.context["tenant"]["id"] users = self.context["tenants"][tenant_id]["users"] number = users.index(self.context["user"]) for network in self.context["tenants"][tenant_id]["networks"]: # delete one of subnets based on the user sequential number subnet_id = network["subnets"][number] self.neutron.delete_subnet(subnet_id)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,826
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/neutron/trunk.py
# Copyright 2014: Intel Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import cfg from rally.task import types from rally.task import validation from rally_openstack.common import consts from rally_openstack.task import scenario from rally_openstack.task.scenarios.neutron import utils as neutron_utils from rally_openstack.task.scenarios.nova import utils as nova_utils CONF = cfg.CONF """Scenarios for Neutron Trunk.""" @validation.add("number", param_name="subport_count", minval=1, integer_only=True) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["neutron"]}, name="NeutronTrunks.create_and_list_trunks") class CreateAndListTrunks(neutron_utils.NeutronScenario): def run(self, network_create_args=None, subport_count=10): """Create a given number of trunks with subports and list all trunks. :param network_create_args: dict, POST /v2.0/networks request options. Deprecated. :param trunk_count: int, number of trunk ports :param subport_count: int, number of subports per trunk """ net = self._create_network(network_create_args or {}) ports = [self._create_port(net, {}) for _ in range(subport_count + 1)] parent, subports = ports[0], ports[1:] subport_payload = [{"port_id": p["port"]["id"], "segmentation_type": "vlan", "segmentation_id": seg_id} for seg_id, p in enumerate(subports, start=1)] trunk_payload = {"port_id": parent["port"]["id"], "sub_ports": subport_payload} trunk = self._create_trunk(trunk_payload) self._list_trunks() self._list_subports_by_trunk(trunk["trunk"]["id"]) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=(consts.Service.NOVA, consts.Service.NEUTRON)) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["neutron", "nova"]}, name="NeutronTrunks.boot_server_with_subports", platform="openstack") class BootServerWithSubports(nova_utils.NovaScenario, neutron_utils.NeutronScenario): def run(self, image, flavor, network_create_args=None, subport_count=10): """Boot a server with subports. Returns when the server is actually booted and in "ACTIVE" state. :param image: image ID or instance for server creation :param flavor: int, flavor ID or instance for server creation :param network_create_args: arguments for creating network :param subport_count: number of subports for the trunk port """ kwargs = {} ports = [] network_create_args = network_create_args or {} for _ in range(subport_count + 1): net, subnet = self._create_network_and_subnets( network_create_args=network_create_args) ports.append(self._create_port( net, {"fixed_ips": [{ "subnet_id": subnet[0]["subnet"]["id"]}]})) parent, subports = ports[0], ports[1:] subport_payload = [{"port_id": p["port"]["id"], "segmentation_type": "vlan", "segmentation_id": seg_id} for seg_id, p in enumerate(subports, start=1)] trunk_payload = {"port_id": parent["port"]["id"], "sub_ports": subport_payload} self._create_trunk(trunk_payload) kwargs["nics"] = [{"port-id": parent["port"]["id"]}] self._boot_server(image, flavor, **kwargs) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=(consts.Service.NOVA, consts.Service.NEUTRON)) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["neutron", "nova"]}, name="NeutronTrunks.boot_server_and_add_subports", platform="openstack") class BootServerAndAddSubports(nova_utils.NovaScenario, neutron_utils.NeutronScenario): def run(self, image, flavor, network_create_args=None, subport_count=10): """Boot a server and add subports. Returns when the server is actually booted and in "ACTIVE" state. :param image: image ID or instance for server creation :param flavor: int, flavor ID or instance for server creation :param network_create_args: arguments for creating network :param subport_count: number of subports for the trunk port """ kwargs = {} ports = [] network_create_args = network_create_args or {} for _ in range(subport_count + 1): net, subnet = self._create_network_and_subnets( network_create_args=network_create_args) ports.append(self._create_port( net, {"fixed_ips": [{ "subnet_id": subnet[0]["subnet"]["id"]}]})) parent, subports = ports[0], ports[1:] trunk_payload = {"port_id": parent["port"]["id"]} trunk = self._create_trunk(trunk_payload) kwargs["nics"] = [{"port-id": parent["port"]["id"]}] self._boot_server(image, flavor, **kwargs) for seg_id, p in enumerate(subports, start=1): subport_payload = [{"port_id": p["port"]["id"], "segmentation_type": "vlan", "segmentation_id": seg_id}] self._add_subports_to_trunk(trunk["trunk"]["id"], subport_payload) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=(consts.Service.NOVA, consts.Service.NEUTRON)) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["neutron", "nova"]}, name="NeutronTrunks.boot_server_and_batch_add_subports", platform="openstack") class BootServerAndBatchAddSubports(nova_utils.NovaScenario, neutron_utils.NeutronScenario): def run(self, image, flavor, network_create_args=None, subports_per_batch=10, batches=5): """Boot a server and add subports in batches. Returns when the server is actually booted and in "ACTIVE" state. :param image: image ID or instance for server creation :param flavor: int, flavor ID or instance for server creation :param network_create_args: arguments for creating network :param subports_per_batch: number of subports per batches :param batches: number of batches to create subports in """ kwargs = {} ports = [] network_create_args = network_create_args or {} for _ in range(subports_per_batch * batches + 1): net, subnet = self._create_network_and_subnets( network_create_args=network_create_args) ports.append(self._create_port( net, {"fixed_ips": [{ "subnet_id": subnet[0]["subnet"]["id"]}]})) parent, subports = ports[0], ports[1:] trunk_payload = {"port_id": parent["port"]["id"]} trunk = self._create_trunk(trunk_payload) kwargs["nics"] = [{"port-id": parent["port"]["id"]}] self._boot_server(image, flavor, **kwargs) begin = 0 for _ in range(0, batches): end = begin + subports_per_batch subport_payload = [{"port_id": p["port"]["id"], "segmentation_type": "vlan", "segmentation_id": seg_id} for seg_id, p in enumerate( subports[slice(begin, end)], start=begin + 1)] begin = begin + subports_per_batch self._add_subports_to_trunk(trunk["trunk"]["id"], subport_payload)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,827
openstack/rally-openstack
refs/heads/master
/rally_openstack/common/services/identity/keystone_v2.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from rally.task import atomic from rally_openstack.common import service from rally_openstack.common.services.identity import identity from rally_openstack.common.services.identity import keystone_common @service.service("keystone", service_type="identity", version="2") class KeystoneV2Service(service.Service, keystone_common.KeystoneMixin): @atomic.action_timer("keystone_v2.create_tenant") def create_tenant(self, tenant_name=None): tenant_name = tenant_name or self.generate_random_name() return self._clients.keystone("2").tenants.create(tenant_name) @atomic.action_timer("keystone_v2.update_tenant") def update_tenant(self, tenant_id, name=None, enabled=None, description=None): """Update tenant name and description. :param tenant_id: Id of tenant to update :param name: tenant name to be set (if boolean True, random name will be set) :param enabled: enabled status of project :param description: tenant description to be set (if boolean True, random description will be set) """ if name is True: name = self.generate_random_name() if description is True: description = self.generate_random_name() self._clients.keystone("2").tenants.update( tenant_id, name=name, description=description, enabled=enabled) @atomic.action_timer("keystone_v2.delete_tenant") def delete_tenant(self, tenant_id): return self._clients.keystone("2").tenants.delete(tenant_id) @atomic.action_timer("keystone_v2.list_tenants") def list_tenants(self): return self._clients.keystone("2").tenants.list() @atomic.action_timer("keystone_v2.get_tenant") def get_tenant(self, tenant_id): """Get tenant.""" return self._clients.keystone("2").tenants.get(tenant_id) @atomic.action_timer("keystone_v2.create_user") def create_user(self, username=None, password=None, email=None, tenant_id=None, enabled=True): username = username or self.generate_random_name() password = password or str(uuid.uuid4()) email = email or (username + "@rally.me") return self._clients.keystone("2").users.create(name=username, password=password, email=email, tenant_id=tenant_id, enabled=enabled) @atomic.action_timer("keystone_v2.create_users") def create_users(self, tenant_id, number_of_users, user_create_args=None): """Create specified amount of users. :param tenant_id: Id of tenant :param number_of_users: number of users to create :param user_create_args: additional user creation arguments """ users = [] for _i in range(number_of_users): users.append(self.create_user(tenant_id=tenant_id, **(user_create_args or {}))) return users @atomic.action_timer("keystone_v2.update_user") def update_user(self, user_id, **kwargs): allowed_args = ("name", "email", "enabled") restricted = set(kwargs) - set(allowed_args) if restricted: raise NotImplementedError( "Failed to update '%s', since Keystone V2 allows to update " "only '%s'." % ("', '".join(restricted), "', '".join(allowed_args))) self._clients.keystone("2").users.update(user_id, **kwargs) @atomic.action_timer("keystone_v2.update_user_password") def update_user_password(self, user_id, password): self._clients.keystone("2").users.update_password(user_id, password=password) @atomic.action_timer("keystone_v2.create_service") def create_service(self, name=None, service_type=None, description=None): """Creates keystone service. :param name: name of service to create :param service_type: type of the service :param description: description of the service :returns: keystone service instance """ name = name or self.generate_random_name() service_type = service_type or "rally_test_type" description = description or self.generate_random_name() return self._clients.keystone("2").services.create( name, service_type=service_type, description=description) @atomic.action_timer("keystone_v2.create_role") def create_role(self, name=None): name = name or self.generate_random_name() return self._clients.keystone("2").roles.create(name) @atomic.action_timer("keystone_v2.add_role") def add_role(self, role_id, user_id, tenant_id): self._clients.keystone("2").roles.add_user_role( user=user_id, role=role_id, tenant=tenant_id) @atomic.action_timer("keystone_v2.list_roles") def list_roles(self): """List all roles.""" return self._clients.keystone("2").roles.list() @atomic.action_timer("keystone_v2.list_roles_for_user") def list_roles_for_user(self, user_id, tenant_id=None): return self._clients.keystone("2").roles.roles_for_user( user_id, tenant_id) @atomic.action_timer("keystone_v2.revoke_role") def revoke_role(self, role_id, user_id, tenant_id): self._clients.keystone("2").roles.remove_user_role(user=user_id, role=role_id, tenant=tenant_id) @atomic.action_timer("keystone_v2.create_ec2creds") def create_ec2credentials(self, user_id, tenant_id): """Create ec2credentials. :param user_id: User ID for which to create credentials :param tenant_id: Tenant ID for which to create credentials :returns: Created ec2-credentials object """ return self._clients.keystone("2").ec2.create(user_id, tenant_id=tenant_id) @service.compat_layer(KeystoneV2Service) class UnifiedKeystoneV2Service(keystone_common.UnifiedKeystoneMixin, identity.Identity): """Compatibility layer for Keystone V2.""" @staticmethod def _check_domain(domain_name): if domain_name.lower() != "default": raise NotImplementedError("Domain functionality not implemented " "in Keystone v2") @staticmethod def _unify_tenant(tenant): return identity.Project(id=tenant.id, name=tenant.name, domain_id="default") @staticmethod def _unify_user(user): return identity.User(id=user.id, name=user.name, project_id=getattr(user, "tenantId", None), domain_id="default") def create_project(self, project_name=None, domain_name="Default"): """Creates new project/tenant and return project object. :param project_name: Name of project to be created. :param domain_name: Restricted for Keystone V2. Should not be set or "Default" is expected. """ self._check_domain(domain_name) tenant = self._impl.create_tenant(project_name) return self._unify_tenant(tenant) def update_project(self, project_id, name=None, enabled=None, description=None): """Update project name, enabled and description :param project_id: Id of project to update :param name: project name to be set :param enabled: enabled status of project :param description: project description to be set """ self._impl.update_tenant(tenant_id=project_id, name=name, enabled=enabled, description=description) def delete_project(self, project_id): """Deletes project.""" return self._impl.delete_tenant(project_id) def list_projects(self): """List all projects.""" return [self._unify_tenant(t) for t in self._impl.list_tenants()] def get_project(self, project_id): """Get project.""" return self._unify_tenant(self._impl.get_tenant(project_id)) def create_user(self, username=None, password=None, project_id=None, domain_name="Default", enabled=True, default_role="member"): """Create user. :param username: name of user :param password: user password :param project_id: user's default project :param domain_name: Restricted for Keystone V2. Should not be set or "Default" is expected. :param enabled: whether the user is enabled. :param default_role: Restricted for Keystone V2. Should not be set or "member" is expected. """ self._check_domain(domain_name) user = self._impl.create_user(username=username, password=password, tenant_id=project_id, enabled=enabled) return self._unify_user(user) def create_users(self, tenant_id, number_of_users, user_create_args=None): """Create specified amount of users. :param tenant_id: Id of tenant :param number_of_users: number of users to create :param user_create_args: additional user creation arguments """ if user_create_args and "domain_name" in user_create_args: self._check_domain(user_create_args["domain_name"]) return [self._unify_user(u) for u in self._impl.create_users( tenant_id=tenant_id, number_of_users=number_of_users, user_create_args=user_create_args)] def list_users(self): """List all users.""" return [self._unify_user(u) for u in self._impl.list_users()] def update_user(self, user_id, enabled=None, name=None, email=None, password=None): if password is not None: self._impl.update_user_password(user_id=user_id, password=password) update_args = {} if enabled is not None: update_args["enabled"] = enabled if name is not None: update_args["name"] = name if email is not None: update_args["email"] = email if update_args: self._impl.update_user(user_id, **update_args) def list_services(self): """List all services.""" return [self._unify_service(s) for s in self._impl.list_services()] def create_role(self, name=None, domain_name=None): """Add role to user.""" if domain_name is not None: raise NotImplementedError("Domain functionality not implemented " "in Keystone v2") return self._unify_role(self._impl.create_role(name)) def add_role(self, role_id, user_id, project_id): """Add role to user.""" self._impl.add_role(role_id=role_id, user_id=user_id, tenant_id=project_id) def revoke_role(self, role_id, user_id, project_id): """Revokes a role from a user.""" return self._impl.revoke_role(role_id=role_id, user_id=user_id, tenant_id=project_id) def list_roles(self, user_id=None, project_id=None, domain_name=None): """List all roles.""" if domain_name: raise NotImplementedError("Domain functionality not implemented " "in Keystone v2") if user_id: roles = self._impl.list_roles_for_user(user_id, tenant_id=project_id) else: roles = self._impl.list_roles() return [self._unify_role(role) for role in roles] def create_ec2credentials(self, user_id, project_id): """Create ec2credentials. :param user_id: User ID for which to create credentials :param project_id: Project ID for which to create credentials :returns: Created ec2-credentials object """ return self._impl.create_ec2credentials(user_id=user_id, tenant_id=project_id)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,828
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/nova/servers.py
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import jsonschema from rally.common import logging from rally import exceptions as rally_exceptions from rally.task import types from rally.task import validation from rally_openstack.common import consts from rally_openstack.task import scenario from rally_openstack.task.scenarios.cinder import utils as cinder_utils from rally_openstack.task.scenarios.neutron import utils as neutron_utils from rally_openstack.task.scenarios.nova import utils """Scenarios for Nova servers.""" LOG = logging.getLogger(__name__) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServers.boot_and_list_server", platform="openstack") class BootAndListServer(utils.NovaScenario): def run(self, image, flavor, detailed=True, **kwargs): """Boot a server from an image and then list all servers. Measure the "nova list" command performance. If you have only 1 user in your context, you will add 1 server on every iteration. So you will have more and more servers and will be able to measure the performance of the "nova list" command depending on the number of servers owned by users. :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param detailed: True if the server listing should contain detailed information about all of them :param kwargs: Optional additional arguments for server creation """ server = self._boot_server(image, flavor, **kwargs) msg = ("Servers isn't created") self.assertTrue(server, err_msg=msg) pool_list = self._list_servers(detailed) msg = ("Server not included into list of available servers\n" "Booted server: {}\n" "Pool of servers: {}").format(server, pool_list) self.assertIn(server, pool_list, err_msg=msg) @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(name="NovaServers.list_servers", platform="openstack") class ListServers(utils.NovaScenario): def run(self, detailed=True): """List all servers. This simple scenario test the nova list command by listing all the servers. :param detailed: True if detailed information about servers should be listed """ self._list_servers(detailed) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServers.boot_and_delete_server", platform="openstack") class BootAndDeleteServer(utils.NovaScenario): def run(self, image, flavor, min_sleep=0, max_sleep=0, force_delete=False, **kwargs): """Boot and delete a server. Optional 'min_sleep' and 'max_sleep' parameters allow the scenario to simulate a pause between volume creation and deletion (of random duration from [min_sleep, max_sleep]). :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param min_sleep: Minimum sleep time in seconds (non-negative) :param max_sleep: Maximum sleep time in seconds (non-negative) :param force_delete: True if force_delete should be used :param kwargs: Optional additional arguments for server creation """ server = self._boot_server(image, flavor, **kwargs) self.sleep_between(min_sleep, max_sleep) self._delete_server(server, force=force_delete) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True, users=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServers.boot_and_delete_multiple_servers", platform="openstack") class BootAndDeleteMultipleServers(utils.NovaScenario): def run(self, image, flavor, count=2, min_sleep=0, max_sleep=0, force_delete=False, **kwargs): """Boot multiple servers in a single request and delete them. Deletion is done in parallel with one request per server, not with a single request for all servers. :param image: The image to boot from :param flavor: Flavor used to boot instance :param count: Number of instances to boot :param min_sleep: Minimum sleep time in seconds (non-negative) :param max_sleep: Maximum sleep time in seconds (non-negative) :param force_delete: True if force_delete should be used :param kwargs: Optional additional arguments for instance creation """ servers = self._boot_servers(image, flavor, 1, instances_amount=count, **kwargs) self.sleep_between(min_sleep, max_sleep) self._delete_servers(servers, force=force_delete) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image", validate_disk=False) @validation.add("required_services", services=[consts.Service.NOVA, consts.Service.CINDER]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova", "cinder"]}, name="NovaServers.boot_server_from_volume_and_delete", platform="openstack") class BootServerFromVolumeAndDelete(utils.NovaScenario, cinder_utils.CinderBasic): def run(self, image, flavor, volume_size, volume_type=None, min_sleep=0, max_sleep=0, force_delete=False, **kwargs): """Boot a server from volume and then delete it. The scenario first creates a volume and then a server. Optional 'min_sleep' and 'max_sleep' parameters allow the scenario to simulate a pause between volume creation and deletion (of random duration from [min_sleep, max_sleep]). :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param volume_size: volume size (in GB) :param volume_type: specifies volume type when there are multiple backends :param min_sleep: Minimum sleep time in seconds (non-negative) :param max_sleep: Maximum sleep time in seconds (non-negative) :param force_delete: True if force_delete should be used :param kwargs: Optional additional arguments for server creation """ volume = self.cinder.create_volume(volume_size, imageRef=image, volume_type=volume_type) block_device_mapping = {"vda": "%s:::0" % volume.id} server = self._boot_server(None, flavor, block_device_mapping=block_device_mapping, **kwargs) self.sleep_between(min_sleep, max_sleep) self._delete_server(server, force=force_delete) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServers.boot_and_bounce_server", platform="openstack") class BootAndBounceServer(utils.NovaScenario): def run(self, image, flavor, force_delete=False, actions=None, **kwargs): """Boot a server and run specified actions against it. Actions should be passed into the actions parameter. Available actions are 'hard_reboot', 'soft_reboot', 'stop_start', 'rescue_unrescue', 'pause_unpause', 'suspend_resume', 'lock_unlock' and 'shelve_unshelve'. Delete server after all actions were completed. :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param force_delete: True if force_delete should be used :param actions: list of action dictionaries, where each action dictionary speicifes an action to be performed in the following format: {"action_name": <no_of_iterations>} :param kwargs: Optional additional arguments for server creation """ action_builder = self._bind_actions() actions = actions or [] try: action_builder.validate(actions) except jsonschema.exceptions.ValidationError as error: raise rally_exceptions.InvalidConfigException( "Invalid server actions configuration \'%(actions)s\' due to: " "%(error)s" % {"actions": str(actions), "error": str(error)}) server = self._boot_server(image, flavor, **kwargs) for action in action_builder.build_actions(actions, server): action() self._delete_server(server, force=force_delete) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServers.boot_lock_unlock_and_delete", platform="openstack") class BootLockUnlockAndDelete(utils.NovaScenario): def run(self, image, flavor, min_sleep=0, max_sleep=0, force_delete=False, **kwargs): """Boot a server, lock it, then unlock and delete it. Optional 'min_sleep' and 'max_sleep' parameters allow the scenario to simulate a pause between locking and unlocking the server (of random duration from min_sleep to max_sleep). :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param min_sleep: Minimum sleep time between locking and unlocking in seconds :param max_sleep: Maximum sleep time between locking and unlocking in seconds :param force_delete: True if force_delete should be used :param kwargs: Optional additional arguments for server creation """ server = self._boot_server(image, flavor, **kwargs) self._lock_server(server) self.sleep_between(min_sleep, max_sleep) self._unlock_server(server) self._delete_server(server, force=force_delete) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA, consts.Service.GLANCE]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova", "glance"]}, name="NovaServers.snapshot_server", platform="openstack") class SnapshotServer(utils.NovaScenario): def run(self, image, flavor, force_delete=False, **kwargs): """Boot a server, make its snapshot and delete both. :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param force_delete: True if force_delete should be used :param kwargs: Optional additional arguments for server creation """ server = self._boot_server(image, flavor, **kwargs) image = self._create_image(server) self._delete_server(server, force=force_delete) server = self._boot_server(image.id, flavor, **kwargs) self._delete_server(server, force=force_delete) self._delete_image(image) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServers.boot_server", platform="openstack") class BootServer(utils.NovaScenario): def run(self, image, flavor, auto_assign_nic=False, **kwargs): """Boot a server. Assumes that cleanup is done elsewhere. :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param auto_assign_nic: True if NICs should be assigned :param kwargs: Optional additional arguments for server creation """ self._boot_server(image, flavor, auto_assign_nic=auto_assign_nic, **kwargs) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image", validate_disk=False) @validation.add("required_services", services=[consts.Service.NOVA, consts.Service.CINDER]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova", "cinder"]}, name="NovaServers.boot_server_from_volume", platform="openstack") class BootServerFromVolume(utils.NovaScenario, cinder_utils.CinderBasic): def run(self, image, flavor, volume_size, volume_type=None, auto_assign_nic=False, **kwargs): """Boot a server from volume. The scenario first creates a volume and then a server. Assumes that cleanup is done elsewhere. :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param volume_size: volume size (in GB) :param volume_type: specifies volume type when there are multiple backends :param auto_assign_nic: True if NICs should be assigned :param kwargs: Optional additional arguments for server creation """ volume = self.cinder.create_volume(volume_size, imageRef=image, volume_type=volume_type) block_device_mapping = {"vda": "%s:::0" % volume.id} self._boot_server(None, flavor, auto_assign_nic=auto_assign_nic, block_device_mapping=block_device_mapping, **kwargs) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}, to_flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=(consts.Service.NOVA)) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServers.resize_server", platform="openstack") class ResizeServer(utils.NovaScenario): def run(self, image, flavor, to_flavor, force_delete=False, **kwargs): """Boot a server, then resize and delete it. This test will confirm the resize by default, or revert the resize if confirm is set to false. :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param to_flavor: flavor to be used to resize the booted instance :param force_delete: True if force_delete should be used :param kwargs: Optional additional arguments for server creation """ server = self._boot_server(image, flavor, **kwargs) self._resize(server, to_flavor) # by default we confirm confirm = kwargs.get("confirm", True) if confirm: self._resize_confirm(server) else: self._resize_revert(server) self._delete_server(server, force=force_delete) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}, to_flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServers.resize_shutoff_server", platform="openstack") class ResizeShutoffServer(utils.NovaScenario): def run(self, image, flavor, to_flavor, confirm=True, force_delete=False, **kwargs): """Boot a server and stop it, then resize and delete it. This test will confirm the resize by default, or revert the resize if confirm is set to false. :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param to_flavor: flavor to be used to resize the booted instance :param confirm: True if need to confirm resize else revert resize :param force_delete: True if force_delete should be used :param kwargs: Optional additional arguments for server creation """ server = self._boot_server(image, flavor, **kwargs) self._stop_server(server) self._resize(server, to_flavor) if confirm: self._resize_confirm(server, "SHUTOFF") else: self._resize_revert(server, "SHUTOFF") self._delete_server(server, force=force_delete) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}, to_flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA, consts.Service.CINDER]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure( context={"cleanup@openstack": ["cinder", "nova"]}, name="NovaServers.boot_server_attach_created_volume_and_resize", platform="openstack") class BootServerAttachCreatedVolumeAndResize(utils.NovaScenario, cinder_utils.CinderBasic): def run(self, image, flavor, to_flavor, volume_size, min_sleep=0, max_sleep=0, force_delete=False, confirm=True, do_delete=True, boot_server_kwargs=None, create_volume_kwargs=None): """Create a VM from image, attach a volume to it and resize. Simple test to create a VM and attach a volume, then resize the VM, detach the volume then delete volume and VM. Optional 'min_sleep' and 'max_sleep' parameters allow the scenario to simulate a pause between attaching a volume and running resize (of random duration from range [min_sleep, max_sleep]). :param image: Glance image name to use for the VM :param flavor: VM flavor name :param to_flavor: flavor to be used to resize the booted instance :param volume_size: volume size (in GB) :param min_sleep: Minimum sleep time in seconds (non-negative) :param max_sleep: Maximum sleep time in seconds (non-negative) :param force_delete: True if force_delete should be used :param confirm: True if need to confirm resize else revert resize :param do_delete: True if resources needs to be deleted explicitly else use rally cleanup to remove resources :param boot_server_kwargs: optional arguments for VM creation :param create_volume_kwargs: optional arguments for volume creation """ boot_server_kwargs = boot_server_kwargs or {} create_volume_kwargs = create_volume_kwargs or {} server = self._boot_server(image, flavor, **boot_server_kwargs) volume = self.cinder.create_volume(volume_size, **create_volume_kwargs) self._attach_volume(server, volume) self.sleep_between(min_sleep, max_sleep) self._resize(server, to_flavor) if confirm: self._resize_confirm(server) else: self._resize_revert(server) if do_delete: self._detach_volume(server, volume) self.cinder.delete_volume(volume) self._delete_server(server, force=force_delete) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA, consts.Service.CINDER]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure( context={"cleanup@openstack": ["cinder", "nova"]}, name="NovaServers.boot_server_attach_created_volume_and_extend", platform="openstack") class BootServerAttachCreatedVolumeAndExtend(utils.NovaScenario, cinder_utils.CinderBasic): def run(self, image, flavor, volume_size, new_volume_size, min_sleep=0, max_sleep=0, force_delete=False, do_delete=True, boot_server_kwargs=None, create_volume_kwargs=None): """Create a VM from image, attach a volume then extend volume Simple test to create a VM and attach a volume, then extend the volume while its running, detach the volume then delete volume and VM. Optional 'min_sleep' and 'max_sleep' parameters allow the scenario to simulate a pause between attaching a volume and running resize (of random duration from range [min_sleep, max_sleep]). :param image: Glance image name to use for the VM :param flavor: VM flavor name :param volume_size: volume size (in GB) :param new_volume_size: new volume size (in GB) :param min_sleep: Minimum sleep time in seconds (non-negative) :param max_sleep: Maximum sleep time in seconds (non-negative) :param force_delete: True if force_delete should be used :param do_delete: True if resources needs to be deleted explicitly else use rally cleanup to remove resources :param boot_server_kwargs: optional arguments for VM creation :param create_volume_kwargs: optional arguments for volume creation """ boot_server_kwargs = boot_server_kwargs or {} create_volume_kwargs = create_volume_kwargs or {} server = self._boot_server(image, flavor, **boot_server_kwargs) volume = self.cinder.create_volume(volume_size, **create_volume_kwargs) self._attach_volume(server, volume) self.sleep_between(min_sleep, max_sleep) self.cinder.extend_volume(volume, new_size=new_volume_size) if do_delete: self._detach_volume(server, volume) self.cinder.delete_volume(volume) self._delete_server(server, force=force_delete) @validation.add("number", param_name="volume_num", minval=1, integer_only=True) @validation.add("number", param_name="volume_size", minval=1, integer_only=True) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image", validate_disk=False) @validation.add("required_services", services=[consts.Service.NOVA, consts.Service.CINDER]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure( context={"cleanup@openstack": ["cinder", "nova"]}, name="NovaServers.boot_server_attach_volume_and_list_attachments", platform="openstack") class BootServerAttachVolumeAndListAttachments(utils.NovaScenario, cinder_utils.CinderBasic): def run(self, image, flavor, volume_size=1, volume_num=2, boot_server_kwargs=None, create_volume_kwargs=None): """Create a VM, attach N volume to it and list server's attachemnt. Measure the "nova volume-attachments" command performance. :param image: Glance image name to use for the VM :param flavor: VM flavor name :param volume_size: volume size (in GB), default 1G :param volume_num: the num of attached volume :param boot_server_kwargs: optional arguments for VM creation :param create_volume_kwargs: optional arguments for volume creation """ boot_server_kwargs = boot_server_kwargs or {} create_volume_kwargs = create_volume_kwargs or {} server = self._boot_server(image, flavor, **boot_server_kwargs) attachments = [] for i in range(volume_num): volume = self.cinder.create_volume(volume_size, **create_volume_kwargs) attachments.append(self._attach_volume(server, volume)) list_attachments = self._list_attachments(server.id) for attachment in attachments: msg = ("attachment not included into list of available " "attachments\n attachment: {}\n" "list attachments: {}").format(attachment, list_attachments) self.assertIn(attachment, list_attachments, err_msg=msg) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}, to_flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image", validate_disk=False) @validation.add("required_services", services=[consts.Service.NOVA, consts.Service.CINDER]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova", "cinder"]}, name="NovaServers.boot_server_from_volume_and_resize", platform="openstack") class BootServerFromVolumeAndResize(utils.NovaScenario, cinder_utils.CinderBasic): def run(self, image, flavor, to_flavor, volume_size, min_sleep=0, max_sleep=0, force_delete=False, confirm=True, do_delete=True, boot_server_kwargs=None, create_volume_kwargs=None): """Boot a server from volume, then resize and delete it. The scenario first creates a volume and then a server. Optional 'min_sleep' and 'max_sleep' parameters allow the scenario to simulate a pause between volume creation and deletion (of random duration from [min_sleep, max_sleep]). This test will confirm the resize by default, or revert the resize if confirm is set to false. :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param to_flavor: flavor to be used to resize the booted instance :param volume_size: volume size (in GB) :param min_sleep: Minimum sleep time in seconds (non-negative) :param max_sleep: Maximum sleep time in seconds (non-negative) :param force_delete: True if force_delete should be used :param confirm: True if need to confirm resize else revert resize :param do_delete: True if resources needs to be deleted explicitly else use rally cleanup to remove resources :param boot_server_kwargs: optional arguments for VM creation :param create_volume_kwargs: optional arguments for volume creation """ boot_server_kwargs = boot_server_kwargs or {} create_volume_kwargs = create_volume_kwargs or {} if boot_server_kwargs.get("block_device_mapping"): LOG.warning("Using already existing volume is not permitted.") volume = self.cinder.create_volume(volume_size, imageRef=image, **create_volume_kwargs) boot_server_kwargs["block_device_mapping"] = { "vda": "%s:::0" % volume.id} server = self._boot_server(None, flavor, **boot_server_kwargs) self.sleep_between(min_sleep, max_sleep) self._resize(server, to_flavor) if confirm: self._resize_confirm(server) else: self._resize_revert(server) if do_delete: self._delete_server(server, force=force_delete) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServers.suspend_and_resume_server", platform="openstack") class SuspendAndResumeServer(utils.NovaScenario): def run(self, image, flavor, force_delete=False, **kwargs): """Create a server, suspend, resume and then delete it :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param force_delete: True if force_delete should be used :param kwargs: Optional additional arguments for server creation """ server = self._boot_server(image, flavor, **kwargs) self._suspend_server(server) self._resume_server(server) self._delete_server(server, force=force_delete) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServers.pause_and_unpause_server", platform="openstack") class PauseAndUnpauseServer(utils.NovaScenario): def run(self, image, flavor, force_delete=False, **kwargs): """Create a server, pause, unpause and then delete it :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param force_delete: True if force_delete should be used :param kwargs: Optional additional arguments for server creation """ server = self._boot_server(image, flavor, **kwargs) self._pause_server(server) self._unpause_server(server) self._delete_server(server, force=force_delete) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServers.shelve_and_unshelve_server", platform="openstack") class ShelveAndUnshelveServer(utils.NovaScenario): def run(self, image, flavor, force_delete=False, **kwargs): """Create a server, shelve, unshelve and then delete it :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param force_delete: True if force_delete should be used :param kwargs: Optional additional arguments for server creation """ server = self._boot_server(image, flavor, **kwargs) self._shelve_server(server) self._unshelve_server(server) self._delete_server(server, force=force_delete) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True, users=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServers.boot_and_live_migrate_server", platform="openstack") class BootAndLiveMigrateServer(utils.NovaScenario): def run(self, image, flavor, block_migration=False, disk_over_commit=False, min_sleep=0, max_sleep=0, **kwargs): """Live Migrate a server. This scenario launches a VM on a compute node available in the availability zone and then migrates the VM to another compute node on the same availability zone. Optional 'min_sleep' and 'max_sleep' parameters allow the scenario to simulate a pause between VM booting and running live migration (of random duration from range [min_sleep, max_sleep]). :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param block_migration: Specifies the migration type :param disk_over_commit: Specifies whether to allow overcommit on migrated instance or not :param min_sleep: Minimum sleep time in seconds (non-negative) :param max_sleep: Maximum sleep time in seconds (non-negative) :param kwargs: Optional additional arguments for server creation """ server = self._boot_server(image, flavor, **kwargs) self.sleep_between(min_sleep, max_sleep) self._live_migrate(server, block_migration, disk_over_commit) self._delete_server(server) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image", validate_disk=False) @validation.add("required_services", services=[consts.Service.NOVA, consts.Service.CINDER]) @validation.add("required_platform", platform="openstack", admin=True, users=True) @scenario.configure( context={"cleanup@openstack": ["nova", "cinder"]}, name="NovaServers.boot_server_from_volume_and_live_migrate", platform="openstack") class BootServerFromVolumeAndLiveMigrate(utils.NovaScenario, cinder_utils.CinderBasic): def run(self, image, flavor, volume_size, volume_type=None, block_migration=False, disk_over_commit=False, force_delete=False, min_sleep=0, max_sleep=0, **kwargs): """Boot a server from volume and then migrate it. The scenario first creates a volume and a server booted from the volume on a compute node available in the availability zone and then migrates the VM to another compute node on the same availability zone. Optional 'min_sleep' and 'max_sleep' parameters allow the scenario to simulate a pause between VM booting and running live migration (of random duration from range [min_sleep, max_sleep]). :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param volume_size: volume size (in GB) :param volume_type: specifies volume type when there are multiple backends :param block_migration: Specifies the migration type :param disk_over_commit: Specifies whether to allow overcommit on migrated instance or not :param force_delete: True if force_delete should be used :param min_sleep: Minimum sleep time in seconds (non-negative) :param max_sleep: Maximum sleep time in seconds (non-negative) :param kwargs: Optional additional arguments for server creation """ volume = self.cinder.create_volume(volume_size, imageRef=image, volume_type=volume_type) block_device_mapping = {"vda": "%s:::0" % volume.id} server = self._boot_server(None, flavor, block_device_mapping=block_device_mapping, **kwargs) self.sleep_between(min_sleep, max_sleep) self._live_migrate(server, block_migration, disk_over_commit) self._delete_server(server, force=force_delete) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA, consts.Service.CINDER]) @validation.add("required_platform", platform="openstack", admin=True, users=True) @scenario.configure( context={"cleanup@openstack": ["cinder", "nova"]}, name="NovaServers.boot_server_attach_created_volume_and_live_migrate", platform="openstack") class BootServerAttachCreatedVolumeAndLiveMigrate(utils.NovaScenario, cinder_utils.CinderBasic): def run(self, image, flavor, size, block_migration=False, disk_over_commit=False, boot_server_kwargs=None, create_volume_kwargs=None, min_sleep=0, max_sleep=0): """Create a VM, attach a volume to it and live migrate. Simple test to create a VM and attach a volume, then migrate the VM, detach the volume and delete volume/VM. Optional 'min_sleep' and 'max_sleep' parameters allow the scenario to simulate a pause between attaching a volume and running live migration (of random duration from range [min_sleep, max_sleep]). :param image: Glance image name to use for the VM :param flavor: VM flavor name :param size: volume size (in GB) :param block_migration: Specifies the migration type :param disk_over_commit: Specifies whether to allow overcommit on migrated instance or not :param boot_server_kwargs: optional arguments for VM creation :param create_volume_kwargs: optional arguments for volume creation :param min_sleep: Minimum sleep time in seconds (non-negative) :param max_sleep: Maximum sleep time in seconds (non-negative) """ if boot_server_kwargs is None: boot_server_kwargs = {} if create_volume_kwargs is None: create_volume_kwargs = {} server = self._boot_server(image, flavor, **boot_server_kwargs) volume = self.cinder.create_volume(size, **create_volume_kwargs) self._attach_volume(server, volume) self.sleep_between(min_sleep, max_sleep) self._live_migrate(server, block_migration, disk_over_commit) self._detach_volume(server, volume) self.cinder.delete_volume(volume) self._delete_server(server) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True, users=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServers.boot_and_migrate_server", platform="openstack") class BootAndMigrateServer(utils.NovaScenario): def run(self, image, flavor, **kwargs): """Migrate a server. This scenario launches a VM on a compute node available in the availability zone, and then migrates the VM to another compute node on the same availability zone. :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param kwargs: Optional additional arguments for server creation """ server = self._boot_server(image, flavor, **kwargs) self._migrate(server) # NOTE(wtakase): This is required because cold migration and resize # share same code path. confirm = kwargs.get("confirm", True) if confirm: self._resize_confirm(server, status="ACTIVE") else: self._resize_revert(server, status="ACTIVE") self._delete_server(server) @types.convert(from_image={"type": "glance_image"}, to_image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="from_image") @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="to_image") @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", admin=True, users=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServers.boot_and_rebuild_server", platform="openstack") class BootAndRebuildServer(utils.NovaScenario): def run(self, from_image, to_image, flavor, **kwargs): """Rebuild a server. This scenario launches a VM, then rebuilds that VM with a different image. :param from_image: image to be used to boot an instance :param to_image: image to be used to rebuild the instance :param flavor: flavor to be used to boot an instance :param kwargs: Optional additional arguments for server creation """ server = self._boot_server(from_image, flavor, **kwargs) self._rebuild_server(server, to_image) self._delete_server(server) @logging.log_deprecated_args( "Use 'floating_network' for additional instance parameters.", "2.1.0", ["create_floating_ip_args"], once=True) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @validation.add("required_contexts", contexts=["network"]) @scenario.configure( context={"cleanup@openstack": ["nova", "neutron.floatingip"]}, name="NovaServers.boot_and_associate_floating_ip", platform="openstack") class BootAndAssociateFloatingIp(utils.NovaScenario): def run(self, image, flavor, floating_network=None, create_floating_ip_args=None, **kwargs): """Boot a server and associate a floating IP to it. :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param floating_network: external network associated with floating IP. :param create_floating_ip_args: Optional additional dict for specifying external network associated with floating IP ('ext_network' key). :param kwargs: Optional additional arguments for server creation """ if floating_network is None and create_floating_ip_args: if "ext_network" in create_floating_ip_args: # the old way (network wrapper) floating_network = create_floating_ip_args["ext_network"] elif "floating_network" in create_floating_ip_args: # the semi-old way - the time when network wrapper was replaced # by network service, but this compatibility layer was not # provided floating_network = create_floating_ip_args["floating_network"] server = self._boot_server(image, flavor, **kwargs) floatingip = self.neutron.create_floatingip( floating_network=floating_network ) self._associate_floating_ip(server, floatingip) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA, consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova", "neutron"]}, name="NovaServers.boot_server_and_attach_interface", platform="openstack") class BootServerAndAttachInterface(utils.NovaScenario, neutron_utils.NeutronScenario): def run(self, image, flavor, network_create_args=None, subnet_create_args=None, subnet_cidr_start=None, boot_server_args=None): """Create server and subnet, then attach the interface to it. This scenario measures the "nova interface-attach" command performance. :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param network_create_args: dict, POST /v2.0/networks request options. :param subnet_create_args: dict, POST /v2.0/subnets request options :param subnet_cidr_start: str, start value for subnets CIDR :param boot_server_args: Optional additional arguments for server creation """ network = self._get_or_create_network(network_create_args) self._create_subnet(network, subnet_create_args, subnet_cidr_start) server = self._boot_server(image, flavor, **boot_server_args) self._attach_interface(server, net_id=network["network"]["id"]) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServers.boot_and_show_server", platform="openstack") class BootAndShowServer(utils.NovaScenario): def run(self, image, flavor, **kwargs): """Show server details. This simple scenario tests the nova show command by retrieving the server details. :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param kwargs: Optional additional arguments for server creation :returns: Server details """ server = self._boot_server(image, flavor, **kwargs) self._show_server(server) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServers.boot_and_get_console_output", platform="openstack") class BootAndGetConsoleOutput(utils.NovaScenario): def run(self, image, flavor, length=None, **kwargs): """Get text console output from server. This simple scenario tests the nova console-log command by retrieving the text console log output. :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param length: The number of tail log lines you would like to retrieve. None (default value) or -1 means unlimited length. :param kwargs: Optional additional arguments for server creation :returns: Text console log output for server """ server = self._boot_server(image, flavor, **kwargs) self._get_server_console_output(server, length) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServers.boot_and_update_server", platform="openstack") class BootAndUpdateServer(utils.NovaScenario): def run(self, image, flavor, description=None, **kwargs): """Boot a server, then update its name and description. The scenario first creates a server, then update it. Assumes that cleanup is done elsewhere. :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param description: update the server description :param kwargs: Optional additional arguments for server creation """ server = self._boot_server(image, flavor, **kwargs) self._update_server(server, description) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA, consts.Service.CINDER]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova", "cinder"]}, name="NovaServers.boot_server_from_volume_snapshot", platform="openstack") class BootServerFromVolumeSnapshot(utils.NovaScenario, cinder_utils.CinderBasic): def run(self, image, flavor, volume_size, volume_type=None, auto_assign_nic=False, **kwargs): """Boot a server from a snapshot. The scenario first creates a volume and creates a snapshot from this volume, then boots a server from the created snapshot. Assumes that cleanup is done elsewhere. :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param volume_size: volume size (in GB) :param volume_type: specifies volume type when there are multiple backends :param auto_assign_nic: True if NICs should be assigned :param kwargs: Optional additional arguments for server creation """ volume = self.cinder.create_volume(volume_size, imageRef=image, volume_type=volume_type) snapshot = self.cinder.create_snapshot(volume.id, force=False) block_device_mapping = {"vda": "%s:snap::1" % snapshot.id} self._boot_server(None, flavor, auto_assign_nic=auto_assign_nic, block_device_mapping=block_device_mapping, **kwargs) @logging.log_deprecated_args( "Use 'floating_network' for additional instance parameters.", "2.1.0", ["create_floating_ip_args"], once=True) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @validation.add("required_contexts", contexts=["network"]) @scenario.configure( context={"cleanup@openstack": ["nova", "neutron.floatingip"]}, name="NovaServers.boot_server_associate_and_dissociate_floating_ip", platform="openstack") class BootServerAssociateAndDissociateFloatingIP(utils.NovaScenario): def run(self, image, flavor, floating_network=None, create_floating_ip_args=None, **kwargs): """Boot a server associate and dissociate a floating IP from it. The scenario first boot a server and create a floating IP. then associate the floating IP to the server.Finally dissociate the floating IP. :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param floating_network: external network associated with floating IP. :param create_floating_ip_args: Optional additional dict for specifying external network associated with floating IP ('ext_network' key). :param kwargs: Optional additional arguments for server creation """ if floating_network is None and create_floating_ip_args: if "ext_network" in create_floating_ip_args: # the old way (network wrapper) floating_network = create_floating_ip_args["ext_network"] elif "floating_network" in create_floating_ip_args: # the semi-old way - the time when network wrapper was replaced # by network service, but this compatibility layer was not # provided floating_network = create_floating_ip_args["floating_network"] server = self._boot_server(image, flavor, **kwargs) floatingip = self.neutron.create_floatingip( floating_network=floating_network ) self._associate_floating_ip(server, floatingip) self._dissociate_floating_ip(server, floatingip) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @validation.add("required_contexts", contexts=["network"]) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServers.boot_server_and_list_interfaces", platform="openstack") class BootServerAndListInterfaces(utils.NovaScenario): def run(self, image, flavor, **kwargs): """Boot a server and list interfaces attached to it. Measure the "nova boot" and "nova interface-list" command performance. :param image: ID of the image to be used for server creation :param flavor: ID of the flavor to be used for server creation :param **kwargs: Optional arguments for booting the instance """ server = self._boot_server(image, flavor, **kwargs) self._list_interfaces(server) @validation.add( "enum", param_name="console_type", values=["novnc", "xvpvnc", "spice-html5", "rdp-html5", "serial", "webmks"]) @types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"}) @validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image") @validation.add("required_services", services=[consts.Service.NOVA]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["nova"]}, name="NovaServers.boot_and_get_console_url", platform="openstack") class BootAndGetConsoleUrl(utils.NovaScenario): def run(self, image, flavor, console_type, **kwargs): """Retrieve a console url of a server. This simple scenario tests retrieving the console url of a server. :param image: image to be used to boot an instance :param flavor: flavor to be used to boot an instance :param console_type: type can be novnc/xvpvnc for protocol vnc; spice-html5 for protocol spice; rdp-html5 for protocol rdp; serial for protocol serial. webmks for protocol mks (since version 2.8). :param kwargs: Optional additional arguments for server creation """ server = self._boot_server(image, flavor, **kwargs) self._get_console_url_server(server, console_type)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,829
openstack/rally-openstack
refs/heads/master
/rally_openstack/common/services/identity/identity.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.task import service Project = service.make_resource_cls("Project", ["id", "name", "domain_id"]) User = service.make_resource_cls( "User", properties=["id", "name", "project_id", "domain_id"]) Service = service.make_resource_cls("Service", properties=["id", "name"]) Role = service.make_resource_cls("Role", properties=["id", "name"]) class Identity(service.UnifiedService): @classmethod def is_applicable(cls, clients): cloud_version = clients.keystone().version.split(".")[0][1:] return cloud_version == cls._meta_get("impl")._meta_get("version") @service.should_be_overridden def create_project(self, project_name=None, domain_name="Default"): """Creates new project/tenant and return project object. :param project_name: Name of project to be created. :param domain_name: Name or id of domain where to create project, for those service implementations that don't support domains you should use None or 'Default' value. """ return self._impl.create_project(project_name, domain_name=domain_name) @service.should_be_overridden def update_project(self, project_id, name=None, enabled=None, description=None): """Update project name, enabled and description :param project_id: Id of project to update :param name: project name to be set :param enabled: enabled status of project :param description: project description to be set """ self._impl.update_project(project_id, name=name, enabled=enabled, description=description) @service.should_be_overridden def delete_project(self, project_id): """Deletes project.""" return self._impl.delete_project(project_id) @service.should_be_overridden def list_projects(self): """List all projects.""" return self._impl.list_projects() @service.should_be_overridden def get_project(self, project_id): """Get project.""" return self._impl.get_project(project_id) @service.should_be_overridden def create_user(self, username=None, password=None, project_id=None, domain_name="Default", enabled=True, default_role="member"): """Create user. :param username: name of user :param password: user password :param project_id: user's default project :param domain_name: Name or id of domain where to create user, for those service implementations that don't support domains you should use None or 'Default' value. :param enabled: whether the user is enabled. :param default_role: Name of role, for implementations that don't support domains this argument must be None or 'member'. """ return self._impl.create_user(username=username, password=password, project_id=project_id, domain_name=domain_name, default_role=default_role) @service.should_be_overridden def create_users(self, owner_id, number_of_users, user_create_args=None): """Create specified amount of users. :param owner_id: Id of tenant/project :param number_of_users: number of users to create :param user_create_args: additional user creation arguments """ return self._impl.create_users(owner_id, number_of_users=number_of_users, user_create_args=user_create_args) @service.should_be_overridden def delete_user(self, user_id): """Deletes user by its id.""" self._impl.delete_user(user_id) @service.should_be_overridden def list_users(self): """List all users.""" return self._impl.list_users() @service.should_be_overridden def update_user(self, user_id, enabled=None, name=None, email=None, password=None): return self._impl.update_user(user_id, enabled=enabled, name=name, email=email, password=password) @service.should_be_overridden def get_user(self, user_id): """Get user.""" return self._impl.get_user(user_id) @service.should_be_overridden def create_service(self, name=None, service_type=None, description=None): """Creates keystone service with random name. :param name: name of service to create :param service_type: type of the service :param description: description of the service """ return self._impl.create_service(name=name, service_type=service_type, description=description) @service.should_be_overridden def delete_service(self, service_id): """Deletes service.""" self._impl.delete_service(service_id) @service.should_be_overridden def list_services(self): """List all services.""" return self._impl.list_services() @service.should_be_overridden def get_service(self, service_id): """Get service.""" return self._impl.get_service(service_id) @service.should_be_overridden def create_role(self, name=None, domain_name=None): """Create role with specific name :param name: role name :param domain_name: Name or id of domain where to create role, for those service implementations that don't support domains you should use None or 'Default' value. """ return self._impl.create_role(name=name, domain_name=domain_name) @service.should_be_overridden def add_role(self, role_id, user_id, project_id): """Add role to user.""" return self._impl.add_role(role_id=role_id, user_id=user_id, project_id=project_id) @service.should_be_overridden def delete_role(self, role_id): """Deletes role.""" self._impl.delete_role(role_id) @service.should_be_overridden def revoke_role(self, role_id, user_id, project_id): """Revokes a role from a user.""" return self._impl.revoke_role(role_id=role_id, user_id=user_id, project_id=project_id) @service.should_be_overridden def list_roles(self, user_id=None, project_id=None, domain_name=None): """List all roles. :param user_id: filter in role grants for the specified user on a resource. Domain or project must be specified. :param project_id: filter in role grants on the specified project. user_id should be specified :param domain_name: filter in role grants on the specified domain. user_id should be specified """ return self._impl.list_roles(user_id=user_id, project_id=project_id, domain_name=domain_name) @service.should_be_overridden def get_role(self, role_id): """Get role.""" return self._impl.get_role(role_id) @service.should_be_overridden def get_service_by_name(self, name): """List all services to find proper one.""" return self._impl.get_service_by_name(name) @service.should_be_overridden def create_ec2credentials(self, user_id, project_id): """Create ec2credentials. :param user_id: User ID for which to create credentials :param project_id: Project ID for which to create credentials :returns: Created ec2-credentials object """ return self._impl.create_ec2credentials(user_id=user_id, project_id=project_id) @service.should_be_overridden def list_ec2credentials(self, user_id): """List of access/secret pairs for a user_id. :param user_id: List all ec2-credentials for User ID :returns: Return ec2-credentials list """ return self._impl.list_ec2credentials(user_id) @service.should_be_overridden def delete_ec2credential(self, user_id, access): """Delete ec2credential. :param user_id: User ID for which to delete credential :param access: access key for ec2credential to delete """ return self._impl.delete_ec2credential(user_id=user_id, access=access) @service.should_be_overridden def fetch_token(self): """Authenticate user token.""" return self._impl.fetch_token() @service.should_be_overridden def validate_token(self, token): """Validate user token. :param token: Auth token to validate """ return self._impl.validate_token(token)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,830
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/contexts/network/networks.py
# Copyright 2014: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import logging from rally.common import validation from rally_openstack.common import consts from rally_openstack.common.services.network import neutron from rally_openstack.task.cleanup import manager as resource_manager from rally_openstack.task import context LOG = logging.getLogger(__name__) @validation.add("required_platform", platform="openstack", users=True) @context.configure(name="network", platform="openstack", order=350) class Network(context.OpenStackContext): """Create networking resources. This creates networks for all tenants, and optionally creates another resources like subnets and routers. """ CONFIG_SCHEMA = { "type": "object", "$schema": consts.JSON_SCHEMA, "properties": { "start_cidr": { "type": "string" }, "networks_per_tenant": { "type": "integer", "minimum": 1 }, "subnets_per_network": { "type": "integer", "minimum": 1 }, "network_create_args": { "type": "object", "additionalProperties": True }, "dns_nameservers": { "type": "array", "items": {"type": "string"}, "uniqueItems": True }, "dualstack": { "type": "boolean", }, "router": { "type": "object", "properties": { "external": { "type": "boolean", "description": "Create a new external router." }, "enable_snat": { "type": "boolean", "description": "Whether to enable SNAT for a router " "if there is following extension or not" }, "external_gateway_info": { "description": "The external gateway information .", "type": "object", "properties": { "network_id": {"type": "string"}, "enable_snat": {"type": "boolean"} }, "additionalProperties": False } }, "additionalProperties": False } }, "additionalProperties": False } DEFAULT_CONFIG = { "start_cidr": "10.2.0.0/24", "networks_per_tenant": 1, "subnets_per_network": 1, "network_create_args": {}, "router": {"external": True}, "dualstack": False } def setup(self): # NOTE(rkiran): Some clients are not thread-safe. Thus during # multithreading/multiprocessing, it is likely the # sockets are left open. This problem is eliminated by # creating a connection in setup and cleanup separately. for user, tenant_id in self._iterate_per_tenants(): self.context["tenants"][tenant_id]["networks"] = [] self.context["tenants"][tenant_id]["subnets"] = [] client = neutron.NeutronService( user["credential"].clients(), name_generator=self.generate_random_name, atomic_inst=self.atomic_actions() ) network_create_args = self.config["network_create_args"].copy() subnet_create_args = { "start_cidr": (self.config["start_cidr"] if not self.config["dualstack"] else None)} if "dns_nameservers" in self.config: dns_nameservers = self.config["dns_nameservers"] subnet_create_args["dns_nameservers"] = dns_nameservers router_create_args = dict(self.config["router"] or {}) if not router_create_args: # old behaviour - empty dict means no router create router_create_args = None elif "external" in router_create_args: external = router_create_args.pop("external") router_create_args["discover_external_gw"] = external for i in range(self.config["networks_per_tenant"]): net_infra = client.create_network_topology( network_create_args=network_create_args, subnet_create_args=subnet_create_args, subnets_dualstack=self.config["dualstack"], subnets_count=self.config["subnets_per_network"], router_create_args=router_create_args) if net_infra["routers"]: router_id = net_infra["routers"][0]["id"] else: router_id = None net_infra["network"]["router_id"] = router_id self.context["tenants"][tenant_id]["networks"].append( net_infra["network"] ) self.context["tenants"][tenant_id]["subnets"].extend( net_infra["subnets"] ) def cleanup(self): resource_manager.cleanup( names=[ "neutron.subnet", "neutron.network", "neutron.router", "neutron.port" ], admin=self.context.get("admin"), users=self.context.get("users", []), task_id=self.get_owner_id(), superclass=self.__class__ )
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,831
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/nova/test_flavors.py
# Copyright: 2015. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from rally import exceptions from rally_openstack.task.scenarios.nova import flavors from tests.unit import test @ddt.ddt class NovaFlavorsTestCase(test.TestCase): def test_list_flavors(self): scenario = flavors.ListFlavors() scenario._list_flavors = mock.Mock() scenario.run(detailed=True, is_public=True, limit=None, marker=None, min_disk=None, min_ram=None, sort_dir=None, sort_key=None) scenario._list_flavors.assert_called_once_with( detailed=True, is_public=True, limit=None, marker=None, min_disk=None, min_ram=None, sort_dir=None, sort_key=None) def test_create_and_list_flavor_access(self): # Common parameters ram = 100 vcpus = 1 disk = 1 scenario = flavors.CreateAndListFlavorAccess() scenario._create_flavor = mock.Mock() scenario._list_flavor_access = mock.Mock() # Positive case: scenario.run( ram, vcpus, disk, ephemeral=0, flavorid="auto", is_public=False, rxtx_factor=1.0, swap=0) scenario._create_flavor.assert_called_once_with( ram, vcpus, disk, ephemeral=0, flavorid="auto", is_public=False, rxtx_factor=1.0, swap=0) scenario._list_flavor_access.assert_called_once_with( scenario._create_flavor.return_value.id) # Negative case1: flavor wasn't created scenario._create_flavor.return_value = None self.assertRaises(exceptions.RallyAssertionError, scenario.run, ram, vcpus, disk, ephemeral=0, flavorid="auto", is_public=False, rxtx_factor=1.0, swap=0) scenario._create_flavor.assert_called_with( ram, vcpus, disk, ephemeral=0, flavorid="auto", is_public=False, rxtx_factor=1.0, swap=0) def test_create_flavor_add_tenant_access(self): flavor = mock.MagicMock() context = {"user": {"tenant_id": "fake"}, "tenant": {"id": "fake"}} scenario = flavors.CreateFlavorAndAddTenantAccess() scenario.context = context scenario.generate_random_name = mock.MagicMock() scenario._create_flavor = mock.MagicMock(return_value=flavor) scenario._add_tenant_access = mock.MagicMock() # Positive case: scenario.run(ram=100, vcpus=1, disk=1, ephemeral=0, flavorid="auto", is_public=True, rxtx_factor=1.0, swap=0) scenario._create_flavor.assert_called_once_with( 100, 1, 1, ephemeral=0, flavorid="auto", is_public=True, rxtx_factor=1.0, swap=0) scenario._add_tenant_access.assert_called_once_with(flavor.id, "fake") # Negative case1: flavor wasn't created scenario._create_flavor.return_value = None self.assertRaises(exceptions.RallyAssertionError, scenario.run, 100, 1, 1, ephemeral=0, flavorid="auto", is_public=True, rxtx_factor=1.0, swap=0) scenario._create_flavor.assert_called_with( 100, 1, 1, ephemeral=0, flavorid="auto", is_public=True, rxtx_factor=1.0, swap=0) def test_create_flavor(self): scenario = flavors.CreateFlavor() scenario._create_flavor = mock.MagicMock() scenario.run(ram=100, vcpus=1, disk=1, ephemeral=0, flavorid="auto", is_public=True, rxtx_factor=1.0, swap=0) scenario._create_flavor.assert_called_once_with( 100, 1, 1, ephemeral=0, flavorid="auto", is_public=True, rxtx_factor=1.0, swap=0) def test_create_and_get_flavor(self, **kwargs): scenario = flavors.CreateAndGetFlavor() scenario._create_flavor = mock.Mock() scenario._get_flavor = mock.Mock() scenario.run(ram=100, vcpus=1, disk=1, ephemeral=0, flavorid="auto", is_public=True, rxtx_factor=1.0, swap=0) scenario._create_flavor.assert_called_once_with( 100, 1, 1, ephemeral=0, flavorid="auto", is_public=True, rxtx_factor=1.0, swap=0) scenario._get_flavor.assert_called_once_with( scenario._create_flavor.return_value.id) def test_create_and_delete_flavor(self): scenario = flavors.CreateAndDeleteFlavor() scenario._create_flavor = mock.Mock() scenario._delete_flavor = mock.Mock() scenario.run(ram=100, vcpus=1, disk=1, ephemeral=0, flavorid="auto", is_public=True, rxtx_factor=1.0, swap=0) scenario._create_flavor.assert_called_once_with( 100, 1, 1, ephemeral=0, flavorid="auto", is_public=True, rxtx_factor=1.0, swap=0) scenario._delete_flavor.assert_called_once_with( scenario._create_flavor.return_value.id) def test_create_flavor_and_set_keys(self): scenario = flavors.CreateFlavorAndSetKeys() scenario._create_flavor = mock.MagicMock() scenario._set_flavor_keys = mock.MagicMock() specs_args = {"fakeargs": "foo"} scenario.run( ram=100, vcpus=1, disk=1, extra_specs=specs_args, ephemeral=0, flavorid="auto", is_public=True, rxtx_factor=1.0, swap=0) scenario._create_flavor.assert_called_once_with( 100, 1, 1, ephemeral=0, flavorid="auto", is_public=True, rxtx_factor=1.0, swap=0) scenario._set_flavor_keys.assert_called_once_with( scenario._create_flavor.return_value, specs_args)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,832
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/neutron/bgpvpn.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random from rally.task import validation from rally_openstack.common import consts from rally_openstack.task import scenario from rally_openstack.task.scenarios.neutron import utils """Scenarios for Neutron Networking-Bgpvpn.""" def _create_random_route_target(): return "{}:{}".format(random.randint(0, 65535), random.randint(0, 4294967295)) @validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"], missed=True) @validation.add("required_neutron_extensions", extensions=["bgpvpn"]) @validation.add("required_platform", platform="openstack", admin=True) @validation.add("required_services", services=[consts.Service.NEUTRON]) @scenario.configure(context={"admin_cleanup@openstack": ["neutron"]}, name="NeutronBGPVPN.create_and_delete_bgpvpns", platform="openstack") class CreateAndDeleteBgpvpns(utils.NeutronScenario): def run(self, route_targets=None, import_targets=None, export_targets=None, route_distinguishers=None, bgpvpn_type="l3"): """Create bgpvpn and delete the bgpvpn. Measure the "neutron bgpvpn-create" and neutron bgpvpn-delete command performance. :param route_targets: Route Targets that will be both imported and used for export :param import_targets: Additional Route Targets that will be imported :param export_targets: Additional Route Targets that will be used for export. :param route_distinguishers: List of route distinguisher strings :param bgpvpn_type: type of VPN and the technology behind it. Acceptable formats: l2 and l3 """ bgpvpn = self._create_bgpvpn(route_targets=route_targets, import_targets=import_targets, export_targets=export_targets, route_distinguishers=route_distinguishers, type=bgpvpn_type) self._delete_bgpvpn(bgpvpn) @validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"], missed=True) @validation.add("required_neutron_extensions", extensions=["bgpvpn"]) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", admin=True) @scenario.configure(context={"admin_cleanup@openstack": ["neutron"]}, name="NeutronBGPVPN.create_and_list_bgpvpns", platform="openstack") class CreateAndListBgpvpns(utils.NeutronScenario): def run(self, route_targets=None, import_targets=None, export_targets=None, route_distinguishers=None, bgpvpn_type="l3"): """Create a bgpvpn and then list all bgpvpns Measure the "neutron bgpvpn-list" command performance. :param route_targets: Route Targets that will be both imported and used for export :param import_targets: Additional Route Targets that will be imported :param export_targets: Additional Route Targets that will be used for export. :param route_distinguishers: List of route distinguisher strings :param bgpvpn_type: type of VPN and the technology behind it. Acceptable formats: l2 and l3 """ bgpvpn = self._create_bgpvpn(route_targets=route_targets, import_targets=import_targets, export_targets=export_targets, route_distinguishers=route_distinguishers, type=bgpvpn_type) bgpvpns = self._list_bgpvpns() self.assertIn(bgpvpn["bgpvpn"]["id"], [b["id"] for b in bgpvpns]) @validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"], missed=True) @validation.add("required_neutron_extensions", extensions=["bgpvpn"]) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", admin=True) @scenario.configure(context={"admin_cleanup@openstack": ["neutron"]}, name="NeutronBGPVPN.create_and_update_bgpvpns", platform="openstack") class CreateAndUpdateBgpvpns(utils.NeutronScenario): def run(self, update_name=False, route_targets=None, import_targets=None, export_targets=None, route_distinguishers=None, updated_route_targets=None, updated_import_targets=None, updated_export_targets=None, updated_route_distinguishers=None, bgpvpn_type="l3"): """Create and Update bgpvpns Measure the "neutron bgpvpn-update" command performance. :param update_name: bool, whether or not to modify BGP VPN name :param route_targets: Route Targets that will be both imported and used for export :param updated_route_targets: Updated Route Targets that will be both imported and used for export :param import_targets: Additional Route Targets that will be imported :param updated_import_targets: Updated additional Route Targets that will be imported :param export_targets: additional Route Targets that will be used for export. :param updated_export_targets: Updated additional Route Targets that will be used for export. :param route_distinguishers: list of route distinguisher strings :param updated_route_distinguishers: Updated list of route distinguisher strings :param bgpvpn_type: type of VPN and the technology behind it. Acceptable formats: l2 and l3 """ create_bgpvpn_args = { "route_targets": route_targets, "import_targets": import_targets, "export_targets": export_targets, "route_distinguishers": route_distinguishers, "type": bgpvpn_type } bgpvpn = self._create_bgpvpn(**create_bgpvpn_args) update_bgpvpn_args = { "update_name": update_name, "route_targets": updated_route_targets, "import_targets": updated_import_targets, "export_targets": updated_export_targets, "route_distinguishers": updated_route_distinguishers, } self._update_bgpvpn(bgpvpn, **update_bgpvpn_args) @validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"], missed=True) @validation.add("required_neutron_extensions", extensions=["bgpvpn"]) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", admin=True, users=True) @validation.add("required_contexts", contexts=["network", "servers"]) @scenario.configure(context={"admin_cleanup@openstack": ["neutron"], "cleanup@openstack": ["neutron"]}, name="NeutronBGPVPN.create_bgpvpn_assoc_disassoc_networks", platform="openstack") class CreateAndAssociateDissassociateNetworks(utils.NeutronScenario): def run(self, route_targets=None, import_targets=None, export_targets=None, route_distinguishers=None, bgpvpn_type="l3"): """Associate a network and disassociate it from a BGP VPN. Measure the "neutron bgpvpn-create", "neutron bgpvpn-net-assoc-create" and "neutron bgpvpn-net-assoc-delete" command performance. :param route_targets: Route Targets that will be both imported and used for export :param import_targets: Additional Route Targets that will be imported :param export_targets: Additional Route Targets that will be used for export. :param route_distinguishers: List of route distinguisher strings :param bgpvpn_type: type of VPN and the technology behind it. Acceptable formats: l2 and l3 """ networks = self.context.get("tenant", {}).get("networks", []) network = networks[0] if not route_targets: route_targets = _create_random_route_target() bgpvpn = self._create_bgpvpn(route_targets=route_targets, import_targets=import_targets, export_targets=export_targets, route_distinguishers=route_distinguishers, type=bgpvpn_type, tenant_id=network["tenant_id"]) net_asso = self._create_bgpvpn_network_assoc(bgpvpn, network) self._delete_bgpvpn_network_assoc(bgpvpn, net_asso) @validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"], missed=True) @validation.add("required_neutron_extensions", extensions=["bgpvpn"]) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", admin=True, users=True) @validation.add("required_contexts", contexts=["network", "servers"]) @scenario.configure(context={"admin_cleanup@openstack": ["neutron"], "cleanup@openstack": ["neutron"]}, name="NeutronBGPVPN.create_bgpvpn_assoc_disassoc_routers", platform="openstack") class CreateAndAssociateDissassociateRouters(utils.NeutronScenario): def run(self, route_targets=None, import_targets=None, export_targets=None, route_distinguishers=None, bgpvpn_type="l3"): """Associate a router and disassociate it from a BGP VPN. Measure the "neutron bgpvpn-create", "neutron bgpvpn-router-assoc-create" and "neutron bgpvpn-router-assoc-delete" command performance. :param route_targets: Route Targets that will be both imported and used for export :param import_targets: Additional Route Targets that will be imported :param export_targets: Additional Route Targets that will be used for export. :param route_distinguishers: List of route distinguisher strings :param bgpvpn_type: type of VPN and the technology behind it. Acceptable formats: l2 and l3 """ router = { "id": self.context["tenant"]["networks"][0]["router_id"]} tenant_id = self.context["tenant"]["id"] if not route_targets: route_targets = _create_random_route_target() bgpvpn = self._create_bgpvpn(route_targets=route_targets, import_targets=import_targets, export_targets=export_targets, route_distinguishers=route_distinguishers, type=bgpvpn_type, tenant_id=tenant_id) router_asso = self._create_bgpvpn_router_assoc(bgpvpn, router) self._delete_bgpvpn_router_assoc(bgpvpn, router_asso) @validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"], missed=True) @validation.add("required_neutron_extensions", extensions=["bgpvpn"]) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", admin=True, users=True) @validation.add("required_contexts", contexts=["network", "servers"]) @scenario.configure(context={"admin_cleanup@openstack": ["neutron"]}, name="NeutronBGPVPN.create_and_list_networks_associations", platform="openstack") class CreateAndListNetworksAssocs(utils.NeutronScenario): def run(self, route_targets=None, import_targets=None, export_targets=None, route_distinguishers=None, bgpvpn_type="l3"): """Associate a network and list networks associations. Measure the "neutron bgpvpn-create", "neutron bgpvpn-net-assoc-create" and "neutron bgpvpn-net-assoc-list" command performance. :param route_targets: Route Targets that will be both imported and used for export :param import_targets: Additional Route Targets that will be imported :param export_targets: Additional Route Targets that will be used for export. :param route_distinguishers: List of route distinguisher strings :param bgpvpn_type: type of VPN and the technology behind it. Acceptable formats: l2 and l3 """ networks = self.context.get("tenant", {}).get("networks", []) network = networks[0] if not route_targets: route_targets = _create_random_route_target() bgpvpn = self._create_bgpvpn(route_targets=route_targets, import_targets=import_targets, export_targets=export_targets, route_distinguishers=route_distinguishers, type=bgpvpn_type, tenant_id=network["tenant_id"]) self._create_bgpvpn_network_assoc(bgpvpn, network) net_assocs = self._list_bgpvpn_network_assocs( bgpvpn)["network_associations"] network_id = network["id"] msg = ("Network not included into list of associated networks\n" "Network created: {}\n" "List of associations: {}").format(network, net_assocs) list_networks = [net_assoc["network_id"] for net_assoc in net_assocs] self.assertIn(network_id, list_networks, err_msg=msg) @validation.add("enum", param_name="bgpvpn_type", values=["l2", "l3"], missed=True) @validation.add("required_neutron_extensions", extensions=["bgpvpn"]) @validation.add("required_services", services=[consts.Service.NEUTRON]) @validation.add("required_platform", platform="openstack", admin=True, users=True) @validation.add("required_contexts", contexts=["network", "servers"]) @scenario.configure(context={"admin_cleanup@openstack": ["neutron"]}, name="NeutronBGPVPN.create_and_list_routers_associations", platform="openstack") class CreateAndListRoutersAssocs(utils.NeutronScenario): def run(self, route_targets=None, import_targets=None, export_targets=None, route_distinguishers=None, bgpvpn_type="l3"): """Associate a router and list routers associations. Measure the "neutron bgpvpn-create", "neutron bgpvpn-router-assoc-create" and "neutron bgpvpn-router-assoc-list" command performance. :param route_targets: Route Targets that will be both imported and used for export :param import_targets: Additional Route Targets that will be imported :param export_targets: Additional Route Targets that will be used for export. :param route_distinguishers: List of route distinguisher strings :param bgpvpn_type: type of VPN and the technology behind it. Acceptable formats: l2 and l3 """ router = { "id": self.context["tenant"]["networks"][0]["router_id"]} tenant_id = self.context["tenant"]["id"] if not route_targets: route_targets = _create_random_route_target() bgpvpn = self._create_bgpvpn(route_targets=route_targets, import_targets=import_targets, export_targets=export_targets, route_distinguishers=route_distinguishers, type=bgpvpn_type, tenant_id=tenant_id) self._create_bgpvpn_router_assoc(bgpvpn, router) router_assocs = self._list_bgpvpn_router_assocs( bgpvpn)["router_associations"] router_id = router["id"] msg = ("Router not included into list of associated routers\n" "Router created: {}\n" "List of associations: {}").format(router, router_assocs) list_routers = [r_assoc["router_id"] for r_assoc in router_assocs] self.assertIn(router_id, list_routers, err_msg=msg)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,833
openstack/rally-openstack
refs/heads/master
/tests/unit/common/services/identity/test_keystone_v3.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid import ddt from rally import exceptions from rally_openstack.common.services.identity import identity from rally_openstack.common.services.identity import keystone_v3 from tests.unit import test PATH = "rally_openstack.common.services.identity.keystone_v3" @ddt.ddt class KeystoneV3ServiceTestCase(test.TestCase): def setUp(self): super(KeystoneV3ServiceTestCase, self).setUp() self.clients = mock.MagicMock() self.kc = self.clients.keystone.return_value self.name_generator = mock.MagicMock() self.service = keystone_v3.KeystoneV3Service( self.clients, name_generator=self.name_generator) def test__get_domain_id_not_found(self): from keystoneclient import exceptions as kc_exceptions self.kc.domains.get.side_effect = kc_exceptions.NotFound self.kc.domains.list.return_value = [] domain_name_or_id = "some" self.assertRaises(exceptions.GetResourceNotFound, self.service._get_domain_id, domain_name_or_id) self.kc.domains.get.assert_called_once_with(domain_name_or_id) self.kc.domains.list.assert_called_once_with(name=domain_name_or_id) def test__get_domain_id_find_by_name(self): from keystoneclient import exceptions as kc_exceptions self.kc.domains.get.side_effect = kc_exceptions.NotFound domain = mock.MagicMock() self.kc.domains.list.return_value = [domain] domain_name_or_id = "some" self.assertEqual(domain.id, self.service._get_domain_id(domain_name_or_id)) self.kc.domains.get.assert_called_once_with(domain_name_or_id) self.kc.domains.list.assert_called_once_with(name=domain_name_or_id) def test__get_domain_id_find_by_id(self): domain = mock.MagicMock() self.kc.domains.get.return_value = domain domain_name_or_id = "some" self.assertEqual(domain.id, self.service._get_domain_id(domain_name_or_id)) self.kc.domains.get.assert_called_once_with(domain_name_or_id) self.assertFalse(self.kc.domains.list.called) @mock.patch("%s.KeystoneV3Service._get_domain_id" % PATH) def test_create_project(self, mock__get_domain_id): name = "name" domain_name = "domain" domain_id = "id" mock__get_domain_id.return_value = domain_id project = self.service.create_project(name, domain_name=domain_name) mock__get_domain_id.assert_called_once_with(domain_name) self.assertEqual(project, self.kc.projects.create.return_value) self.kc.projects.create.assert_called_once_with(name=name, domain=domain_id) @ddt.data({"project_id": "fake_id", "name": True, "enabled": True, "description": True}, {"project_id": "fake_id", "name": "some", "enabled": False, "description": "descr"}) @ddt.unpack def test_update_project(self, project_id, name, enabled, description): self.service.update_project(project_id, name=name, description=description, enabled=enabled) if name is True: name = self.name_generator.return_value if description is True: description = self.name_generator.return_value self.kc.projects.update.assert_called_once_with( project_id, name=name, description=description, enabled=enabled) def test_delete_project(self): project_id = "fake_id" self.service.delete_project(project_id) self.kc.projects.delete.assert_called_once_with(project_id) def test_list_projects(self): self.assertEqual(self.kc.projects.list.return_value, self.service.list_projects()) self.kc.projects.list.assert_called_once_with() def test_get_project(self): project_id = "fake_id" self.service.get_project(project_id) self.kc.projects.get.assert_called_once_with(project_id) @mock.patch("%s.LOG" % PATH) @mock.patch("%s.KeystoneV3Service._get_domain_id" % PATH) def test_create_user(self, mock__get_domain_id, mock_log): name = "name" password = "passwd" project_id = "project" domain_name = "domain" self.service.list_roles = mock.MagicMock(return_value=[]) user = self.service.create_user(name, password=password, project_id=project_id, domain_name=domain_name) self.assertEqual(user, self.kc.users.create.return_value) self.kc.users.create.assert_called_once_with( name=name, password=password, default_project=project_id, domain=mock__get_domain_id.return_value, enabled=True) self.assertTrue(mock_log.warning.called) @mock.patch("%s.LOG" % PATH) @mock.patch("%s.KeystoneV3Service._get_domain_id" % PATH) def test_create_user_without_project_id(self, mock__get_domain_id, mock_log): name = "name" password = "passwd" domain_name = "domain" self.service.list_roles = mock.MagicMock(return_value=[]) user = self.service.create_user(name, password=password, domain_name=domain_name) self.assertEqual(user, self.kc.users.create.return_value) self.kc.users.create.assert_called_once_with( name=name, password=password, default_project=None, domain=mock__get_domain_id.return_value, enabled=True) self.assertFalse(self.service.list_roles.called) self.assertFalse(mock_log.warning.called) @mock.patch("%s.LOG" % PATH) @mock.patch("%s.KeystoneV3Service._get_domain_id" % PATH) def test_create_user_and_add_role( self, mock_keystone_v3_service__get_domain_id, mock_log): mock__get_domain_id = mock_keystone_v3_service__get_domain_id name = "name" password = "passwd" project_id = "project" domain_name = "domain" class Role(object): def __init__(self, name): self.name = name self.id = str(uuid.uuid4()) self.service.list_roles = mock.MagicMock( return_value=[Role("admin"), Role("member")]) self.service.add_role = mock.MagicMock() user = self.service.create_user(name, password=password, project_id=project_id, domain_name=domain_name) self.assertEqual(user, self.kc.users.create.return_value) self.kc.users.create.assert_called_once_with( name=name, password=password, default_project=project_id, domain=mock__get_domain_id.return_value, enabled=True) self.assertFalse(mock_log.warning.called) self.service.add_role.assert_called_once_with( role_id=self.service.list_roles.return_value[1].id, user_id=user.id, project_id=project_id) def test_create_users(self): self.service.create_user = mock.MagicMock() n = 2 project_id = "some" self.assertEqual([self.service.create_user.return_value] * n, self.service.create_users(number_of_users=n, project_id=project_id)) self.assertEqual([mock.call(project_id=project_id)] * n, self.service.create_user.call_args_list) @ddt.data(None, "some") def test_update_user(self, domain_name): user_id = "fake_id" name = "new name" project_id = "new project" password = "pass" email = "mail" description = "n/a" enabled = False default_project = "some" self.service._get_domain_id = mock.MagicMock() self.service.update_user(user_id, name=name, domain_name=domain_name, project_id=project_id, password=password, email=email, description=description, enabled=enabled, default_project=default_project) domain = None if domain_name: self.service._get_domain_id.assert_called_once_with(domain_name) domain = self.service._get_domain_id.return_value else: self.assertFalse(self.service._get_domain_id.called) self.kc.users.update.assert_called_once_with( user_id, name=name, domain=domain, project=project_id, password=password, email=email, description=description, enabled=enabled, default_project=default_project) @ddt.data({"name": None, "service_type": None, "description": None, "enabled": True}, {"name": "some", "service_type": "st", "description": "d", "enabled": False}) @ddt.unpack def test_create_service(self, name, service_type, description, enabled): self.assertEqual(self.kc.services.create.return_value, self.service.create_service(name=name, service_type=service_type, description=description, enabled=enabled)) name = name or self.name_generator.return_value service_type = service_type or "rally_test_type" description = description or self.name_generator.return_value self.kc.services.create.assert_called_once_with( name, type=service_type, description=description, enabled=enabled) @mock.patch("%s.KeystoneV3Service._get_domain_id" % PATH) def test_create_role(self, mock__get_domain_id): domain_name = "domain" name = "some" user = self.service.create_role(name, domain_name=domain_name) self.assertEqual(user, self.kc.roles.create.return_value) self.kc.roles.create.assert_called_once_with( name, domain=mock__get_domain_id.return_value) @ddt.data({"domain_name": "domain", "user_id": "user", "project_id": "pr"}, {"domain_name": None, "user_id": None, "project_id": None}) @ddt.unpack def test_list_roles(self, domain_name, user_id, project_id): self.service._get_domain_id = mock.MagicMock() self.assertEqual(self.kc.roles.list.return_value, self.service.list_roles(user_id=user_id, domain_name=domain_name, project_id=project_id)) domain = None if domain_name: self.service._get_domain_id.assert_called_once_with(domain_name) domain = self.service._get_domain_id.return_value else: self.assertFalse(self.service._get_domain_id.called) self.kc.roles.list.assert_called_once_with(user=user_id, domain=domain, project=project_id) def test_add_role(self): role_id = "fake_id" user_id = "user_id" project_id = "project_id" self.service.add_role(role_id, user_id=user_id, project_id=project_id) self.kc.roles.grant.assert_called_once_with( user=user_id, role=role_id, project=project_id) def test_revoke_role(self): role_id = "fake_id" user_id = "user_id" project_id = "tenant_id" self.service.revoke_role(role_id, user_id=user_id, project_id=project_id) self.kc.roles.revoke.assert_called_once_with( user=user_id, role=role_id, project=project_id) def test_get_role(self): role_id = "fake_id" self.service.get_role(role_id) self.kc.roles.get.assert_called_once_with(role_id) def test_create_domain(self): name = "some_domain" descr = "descr" enabled = False self.service.create_domain(name, description=descr, enabled=enabled) self.kc.domains.create.assert_called_once_with( name, description=descr, enabled=enabled) def test_create_ec2credentials(self): user_id = "fake_id" project_id = "fake_id" self.assertEqual(self.kc.ec2.create.return_value, self.service.create_ec2credentials( user_id, project_id=project_id)) self.kc.ec2.create.assert_called_once_with(user_id, project_id=project_id) @ddt.ddt class UnifiedKeystoneV3ServiceTestCase(test.TestCase): def setUp(self): super(UnifiedKeystoneV3ServiceTestCase, self).setUp() self.clients = mock.MagicMock() self.service = keystone_v3.UnifiedKeystoneV3Service(self.clients) self.service._impl = mock.MagicMock() def test_init_identity_service(self): self.clients.keystone.return_value.version = "v3" self.assertIsInstance(identity.Identity(self.clients)._impl, keystone_v3.UnifiedKeystoneV3Service) def test__unify_project(self): class KeystoneV3Project(object): def __init__(self): self.id = str(uuid.uuid4()) self.name = str(uuid.uuid4()) self.domain_id = str(uuid.uuid4()) project = KeystoneV3Project() unified_project = self.service._unify_project(project) self.assertIsInstance(unified_project, identity.Project) self.assertEqual(project.id, unified_project.id) self.assertEqual(project.name, unified_project.name) self.assertEqual(project.domain_id, unified_project.domain_id) self.assertEqual(project.domain_id, unified_project.domain_id) def test__unify_user(self): class KeystoneV3User(object): def __init__(self, project_id=None): self.id = str(uuid.uuid4()) self.name = str(uuid.uuid4()) self.domain_id = str(uuid.uuid4()) if project_id is not None: self.default_project_id = project_id user = KeystoneV3User() unified_user = self.service._unify_user(user) self.assertIsInstance(unified_user, identity.User) self.assertEqual(user.id, unified_user.id) self.assertEqual(user.name, unified_user.name) self.assertEqual(user.domain_id, unified_user.domain_id) self.assertIsNone(unified_user.project_id) project_id = "tenant_id" user = KeystoneV3User(project_id=project_id) unified_user = self.service._unify_user(user) self.assertIsInstance(unified_user, identity.User) self.assertEqual(user.id, unified_user.id) self.assertEqual(user.name, unified_user.name) self.assertEqual(user.domain_id, unified_user.domain_id) self.assertEqual(project_id, unified_user.project_id) @mock.patch("%s.UnifiedKeystoneV3Service._unify_project" % PATH) def test_create_project(self, mock_unified_keystone_v3_service__unify_project): mock_unify_project = mock_unified_keystone_v3_service__unify_project name = "name" domain = "domain" self.assertEqual(mock_unify_project.return_value, self.service.create_project(name, domain_name=domain)) mock_unify_project.assert_called_once_with( self.service._impl.create_project.return_value) self.service._impl.create_project.assert_called_once_with( name, domain_name=domain) def test_update_project(self): project_id = "fake_id" name = "name" description = "descr" enabled = False self.service.update_project(project_id=project_id, name=name, description=description, enabled=enabled) self.service._impl.update_project.assert_called_once_with( project_id=project_id, name=name, description=description, enabled=enabled) def test_delete_project(self): project_id = "fake_id" self.service.delete_project(project_id) self.service._impl.delete_project.assert_called_once_with(project_id) @mock.patch("%s.UnifiedKeystoneV3Service._unify_project" % PATH) def test_get_project(self, mock_unified_keystone_v3_service__unify_project): mock_unify_project = mock_unified_keystone_v3_service__unify_project project_id = "id" self.assertEqual(mock_unify_project.return_value, self.service.get_project(project_id)) mock_unify_project.assert_called_once_with( self.service._impl.get_project.return_value) self.service._impl.get_project.assert_called_once_with(project_id) @mock.patch("%s.UnifiedKeystoneV3Service._unify_project" % PATH) def test_list_projects(self, mock_unified_keystone_v3_service__unify_project): mock_unify_project = mock_unified_keystone_v3_service__unify_project projects = [mock.MagicMock()] self.service._impl.list_projects.return_value = projects self.assertEqual([mock_unify_project.return_value], self.service.list_projects()) mock_unify_project.assert_called_once_with(projects[0]) @mock.patch("%s.UnifiedKeystoneV3Service._unify_user" % PATH) def test_create_user(self, mock_unified_keystone_v3_service__unify_user): mock_unify_user = mock_unified_keystone_v3_service__unify_user name = "name" password = "passwd" project_id = "project" domain_name = "domain" default_role = "role" self.assertEqual(mock_unify_user.return_value, self.service.create_user(name, password=password, project_id=project_id, domain_name=domain_name, default_role=default_role)) mock_unify_user.assert_called_once_with( self.service._impl.create_user.return_value) self.service._impl.create_user.assert_called_once_with( username=name, password=password, project_id=project_id, domain_name=domain_name, default_role=default_role, enabled=True) @mock.patch("%s.UnifiedKeystoneV3Service._unify_user" % PATH) def test_create_users(self, mock_unified_keystone_v3_service__unify_user): project_id = "project" n = 3 domain_name = "Default" self.service.create_users( project_id, number_of_users=3, user_create_args={"domain_name": domain_name}) self.service._impl.create_users.assert_called_once_with( project_id=project_id, number_of_users=n, user_create_args={"domain_name": domain_name}) @mock.patch("%s.UnifiedKeystoneV3Service._unify_user" % PATH) def test_list_users(self, mock_unified_keystone_v3_service__unify_user): mock_unify_user = mock_unified_keystone_v3_service__unify_user users = [mock.MagicMock()] self.service._impl.list_users.return_value = users self.assertEqual([mock_unify_user.return_value], self.service.list_users()) mock_unify_user.assert_called_once_with(users[0]) @ddt.data({"user_id": "id", "enabled": False, "name": "Fake", "email": "badboy@example.com", "password": "pass"}, {"user_id": "id", "enabled": None, "name": None, "email": None, "password": None}) @ddt.unpack def test_update_user(self, user_id, enabled, name, email, password): self.service.update_user(user_id, enabled=enabled, name=name, email=email, password=password) self.service._impl.update_user.assert_called_once_with( user_id, enabled=enabled, name=name, email=email, password=password) @mock.patch("%s.UnifiedKeystoneV3Service._unify_service" % PATH) def test_list_services(self, mock_unified_keystone_v3_service__unify_service): mock_unify_service = mock_unified_keystone_v3_service__unify_service services = [mock.MagicMock()] self.service._impl.list_services.return_value = services self.assertEqual([mock_unify_service.return_value], self.service.list_services()) mock_unify_service.assert_called_once_with(services[0]) @mock.patch("%s.UnifiedKeystoneV3Service._unify_role" % PATH) def test_create_role(self, mock_unified_keystone_v3_service__unify_role): mock_unify_role = mock_unified_keystone_v3_service__unify_role name = "some" domain = "some" self.assertEqual(mock_unify_role.return_value, self.service.create_role(name, domain_name=domain)) self.service._impl.create_role.assert_called_once_with( name, domain_name=domain) mock_unify_role.assert_called_once_with( self.service._impl.create_role.return_value) def test_add_role(self): role_id = "fake_id" user_id = "user_id" project_id = "user_id" self.service.add_role(role_id, user_id=user_id, project_id=project_id) self.service._impl.add_role.assert_called_once_with( user_id=user_id, role_id=role_id, project_id=project_id) def test_revoke_role(self): role_id = "fake_id" user_id = "user_id" project_id = "user_id" self.service.revoke_role(role_id, user_id=user_id, project_id=project_id) self.service._impl.revoke_role.assert_called_once_with( user_id=user_id, role_id=role_id, project_id=project_id) @mock.patch("%s.UnifiedKeystoneV3Service._unify_role" % PATH) def test_list_roles(self, mock_unified_keystone_v3_service__unify_role): mock_unify_role = mock_unified_keystone_v3_service__unify_role roles = [mock.MagicMock()] self.service._impl.list_roles.return_value = roles self.assertEqual([mock_unify_role.return_value], self.service.list_roles()) mock_unify_role.assert_called_once_with(roles[0]) def test_create_ec2credentials(self): user_id = "id" project_id = "project-id" self.assertEqual(self.service._impl.create_ec2credentials.return_value, self.service.create_ec2credentials( user_id=user_id, project_id=project_id)) self.service._impl.create_ec2credentials.assert_called_once_with( user_id=user_id, project_id=project_id)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,834
openstack/rally-openstack
refs/heads/master
/tests/unit/common/services/gnocchi/test_metric.py
# Copyright 2017 Red Hat, Inc. <http://www.redhat.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally_openstack.common.services.gnocchi import metric from tests.unit import test class GnocchiServiceTestCase(test.TestCase): def setUp(self): super(GnocchiServiceTestCase, self).setUp() self.clients = mock.MagicMock() self.name_generator = mock.MagicMock() self.service = metric.GnocchiService( self.clients, name_generator=self.name_generator) def atomic_actions(self): return self.service._atomic_actions def test__create_archive_policy(self): definition = [{"granularity": "0:00:01", "timespan": "1:00:00"}] aggregation_methods = [ "std", "count", "95pct", "min", "max", "sum", "median", "mean"] archive_policy = {"name": "fake_name"} archive_policy["definition"] = definition archive_policy["aggregation_methods"] = aggregation_methods self.assertEqual( self.service.create_archive_policy( name="fake_name", definition=definition, aggregation_methods=aggregation_methods), self.service._clients.gnocchi().archive_policy.create( archive_policy) ) self._test_atomic_action_timer(self.atomic_actions(), "gnocchi.create_archive_policy") def test__delete_archive_policy(self): self.service.delete_archive_policy("fake_name") self.service._clients.gnocchi().archive_policy.delete \ .assert_called_once_with("fake_name") self._test_atomic_action_timer(self.atomic_actions(), "gnocchi.delete_archive_policy") def test__list_archive_policy(self): self.assertEqual( self.service.list_archive_policy(), self.service._clients.gnocchi().archive_policy.list.return_value ) self._test_atomic_action_timer(self.atomic_actions(), "gnocchi.list_archive_policy") def test__create_archive_policy_rule(self): archive_policy_rule = {"name": "fake_name"} archive_policy_rule["metric_pattern"] = "cpu_*" archive_policy_rule["archive_policy_name"] = "low" self.assertEqual( self.service.create_archive_policy_rule( name="fake_name", metric_pattern="cpu_*", archive_policy_name="low"), self.service._clients.gnocchi().archive_policy_rule.create( archive_policy_rule) ) self._test_atomic_action_timer(self.atomic_actions(), "gnocchi.create_archive_policy_rule") def test__delete_archive_policy_rule(self): self.service.delete_archive_policy_rule("fake_name") self.service._clients.gnocchi().archive_policy_rule \ .delete.assert_called_once_with("fake_name") self._test_atomic_action_timer(self.atomic_actions(), "gnocchi.delete_archive_policy_rule") def test__list_archive_policy_rule(self): self.assertEqual( self.service.list_archive_policy_rule(), self.service._clients.gnocchi().archive_policy_rule.list .return_value ) self._test_atomic_action_timer(self.atomic_actions(), "gnocchi.list_archive_policy_rule") def test__list_capabilities(self): self.assertEqual( self.service.list_capabilities(), self.service._clients.gnocchi().capabilities.list.return_value ) self._test_atomic_action_timer(self.atomic_actions(), "gnocchi.list_capabilities") def test__get_measures_aggregation(self): self.assertEqual( self.service.get_measures_aggregation( metrics=[1], aggregation="mean", refresh=False), self.service._clients.gnocchi().metric.aggregation( [1], "mean", False) ) self._test_atomic_action_timer(self.atomic_actions(), "gnocchi.get_measures_aggregation") def test__get_measures(self): self.assertEqual( self.service.get_measures( metric=1, aggregation="mean", refresh=False), self.service._clients.gnocchi().metric.get_measures( 1, "mean", False) ) self._test_atomic_action_timer(self.atomic_actions(), "gnocchi.get_measures") def test__create_metric(self): param = {"name": "fake_name"} param["archive_policy_name"] = "fake_archive_policy" param["unit"] = "fake_unit" param["resource_id"] = "fake_resource_id" self.assertEqual( self.service.create_metric( name="fake_name", archive_policy_name="fake_archive_policy", unit="fake_unit", resource_id="fake_resource_id"), self.service._clients.gnocchi().metric.create(param) ) self._test_atomic_action_timer(self.atomic_actions(), "gnocchi.create_metric") def test__delete_metric(self): self.service.delete_metric("fake_metric_id") self.service._clients.gnocchi().metric.delete.assert_called_once_with( "fake_metric_id") self._test_atomic_action_timer(self.atomic_actions(), "gnocchi.delete_metric") def test__list_metric(self): self.service.list_metric(limit=0) self.assertEqual( 1, self.service._clients.gnocchi().metric.list.call_count) self._test_atomic_action_timer(self.atomic_actions(), "gnocchi.list_metric") def test__create_resource(self): resource = {"id": "11111"} self.assertEqual( self.service.create_resource("fake_type"), self.service._clients.gnocchi().resource.create( "fake_type", resource) ) self._test_atomic_action_timer(self.atomic_actions(), "gnocchi.create_resource") def test__delete_resource(self): self.service.delete_resource("fake_resource_id") self.service._clients.gnocchi().resource.delete \ .assert_called_once_with("fake_resource_id") self._test_atomic_action_timer(self.atomic_actions(), "gnocchi.delete_resource") def test__list_resource(self): self.assertEqual( self.service.list_resource(), self.service._clients.gnocchi().resource.list.return_value ) self._test_atomic_action_timer(self.atomic_actions(), "gnocchi.list_resource") def test__create_resource_type(self): resource_type = {"name": "fake_name"} self.assertEqual( self.service.create_resource_type("fake_name"), self.service._clients.gnocchi().resource_type.create(resource_type) ) self._test_atomic_action_timer(self.atomic_actions(), "gnocchi.create_resource_type") def test__delete_resource_type(self): self.service.delete_resource_type("fake_resource_name") self.service._clients.gnocchi().resource_type.delete \ .assert_called_once_with("fake_resource_name") self._test_atomic_action_timer(self.atomic_actions(), "gnocchi.delete_resource_type") def test__list_resource_type(self): self.assertEqual( self.service.list_resource_type(), self.service._clients.gnocchi().resource_type.list.return_value ) self._test_atomic_action_timer(self.atomic_actions(), "gnocchi.list_resource_type") def test__get_status(self,): self.assertEqual( self.service.get_status(), self.service._clients.gnocchi().status.get.return_value ) self._test_atomic_action_timer(self.atomic_actions(), "gnocchi.get_status")
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,835
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/contexts/sahara/sahara_job_binaries.py
# Copyright 2014: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import requests from rally.common import validation from rally import exceptions from rally_openstack.common import consts from rally_openstack.common import osclients from rally_openstack.task.cleanup import manager as resource_manager from rally_openstack.task import context from rally_openstack.task.scenarios.sahara import utils @validation.add("required_platform", platform="openstack", users=True) @context.configure(name="sahara_job_binaries", platform="openstack", order=442) class SaharaJobBinaries(context.OpenStackContext): """Context class for setting up Job Binaries for an EDP job.""" CONFIG_SCHEMA = { "type": "object", "$schema": consts.JSON_SCHEMA, "properties": { "mains": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" }, "download_url": { "type": "string" } }, "additionalProperties": False, "required": ["name", "download_url"] } }, "libs": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" }, "download_url": { "type": "string" } }, "additionalProperties": False, "required": ["name", "download_url"] } } }, "additionalProperties": False } # This cache will hold the downloaded libs content to prevent repeated # downloads for each tenant lib_cache = {} def setup(self): utils.init_sahara_context(self) for user, tenant_id in self._iterate_per_tenants(): clients = osclients.Clients(user["credential"]) sahara = clients.sahara() self.context["tenants"][tenant_id]["sahara"]["mains"] = [] self.context["tenants"][tenant_id]["sahara"]["libs"] = [] for main in self.config.get("mains", []): self.download_and_save_lib( sahara=sahara, lib_type="mains", name=main["name"], download_url=main["download_url"], tenant_id=tenant_id) for lib in self.config.get("libs", []): self.download_and_save_lib( sahara=sahara, lib_type="libs", name=lib["name"], download_url=lib["download_url"], tenant_id=tenant_id) def setup_inputs(self, sahara, tenant_id, input_type, input_url): if input_type == "swift": raise exceptions.RallyException( "Swift Data Sources are not implemented yet") # Todo(nkonovalov): Add swift credentials parameters and data upload input_ds = sahara.data_sources.create( name=self.generate_random_name(), description="", data_source_type=input_type, url=input_url) self.context["tenants"][tenant_id]["sahara"]["input"] = input_ds.id def download_and_save_lib(self, sahara, lib_type, name, download_url, tenant_id): if download_url not in self.lib_cache: lib_data = requests.get(download_url).content self.lib_cache[download_url] = lib_data else: lib_data = self.lib_cache[download_url] job_binary_internal = sahara.job_binary_internals.create( name=name, data=lib_data) url = "internal-db://%s" % job_binary_internal.id job_binary = sahara.job_binaries.create(name=name, url=url, description="", extra={}) self.context["tenants"][tenant_id]["sahara"][lib_type].append( job_binary.id) def cleanup(self): resources = ["job_binary_internals", "job_binaries"] resource_manager.cleanup( names=["sahara.%s" % res for res in resources], users=self.context.get("users", []), superclass=utils.SaharaScenario, task_id=self.context["task"]["uuid"])
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,836
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/magnum/cluster_templates.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.task import validation from rally_openstack.common import consts from rally_openstack.task import scenario from rally_openstack.task.scenarios.magnum import utils """Scenarios for Magnum cluster_templates.""" @validation.add("required_services", services=[consts.Service.MAGNUM]) @validation.add("required_platform", platform="openstack", users=True) @scenario.configure(context={"cleanup@openstack": ["magnum"]}, name="MagnumClusterTemplates.list_cluster_templates", platform="openstack") class ListClusterTemplates(utils.MagnumScenario): def run(self, **kwargs): """List all cluster_templates. Measure the "magnum cluster_template-list" command performance. :param limit: (Optional) The maximum number of results to return per request, if: 1) limit > 0, the maximum number of cluster_templates to return. 2) limit param is NOT specified (None), the number of items returned respect the maximum imposed by the Magnum API (see Magnum's api.max_limit option). :param kwargs: optional additional arguments for cluster_templates listing """ self._list_cluster_templates(**kwargs)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,837
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/neutron/test_network.py
# Copyright 2014: Intel Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from rally_openstack.task.scenarios.neutron import network from tests.unit import test BASE = "rally_openstack.task.scenarios.neutron.network" @ddt.ddt class NeutronNetworksTestCase(test.TestCase): def setUp(self): super(NeutronNetworksTestCase, self).setUp() patch = mock.patch("rally_openstack.common.osclients.Clients") self.clients = patch.start().return_value self.clients.credential.api_info = {} self.addCleanup(patch.stop) self.nc = self.clients.neutron.return_value self.context = self.get_test_context() @staticmethod def get_test_context(): ctx = test.get_test_context() ctx.update( user_choice_method="random", tenants={"tenant-1": {}}, users=[ { "tenant_id": "tenant-1", "credential": {} } ] ) return ctx @ddt.data( {"network_create_args": {}}, {"network_create_args": {"admin_state_up": False}}, {"network_create_args": {"provider:network_type": "vxlan"}} ) @ddt.unpack def test_create_and_list_networks(self, network_create_args): net = { "id": "network-id", "name": "network-name", "admin_state_up": False } self.nc.create_network.return_value = {"network": net} scenario = network.CreateAndListNetworks(self.context) scenario.run(network_create_args=network_create_args) self.nc.create_network.assert_called_once_with( {"network": {"name": mock.ANY, **network_create_args}} ) self.nc.list_networks.assert_called_once_with() @ddt.data( {"network_create_args": {}}, {"network_create_args": {"admin_state_up": False}}, ) @ddt.unpack def test_create_and_show_network(self, network_create_args): net = { "id": "network-id", "name": "network-name", "admin_state_up": False } self.nc.create_network.return_value = {"network": net} scenario = network.CreateAndShowNetwork(self.context) scenario.run(network_create_args=network_create_args) self.nc.create_network.assert_called_once_with( {"network": {"name": mock.ANY, **network_create_args}} ) self.nc.show_network.assert_called_once_with(net["id"]) def test_create_and_update_networks(self): net = { "id": "network-id", "name": "network-name", "admin_state_up": False } self.nc.create_network.return_value = {"network": net} scenario = network.CreateAndUpdateNetworks(self.context) network_update_args = {"admin_state_up": True} # Default options scenario.run(network_update_args=network_update_args) self.nc.create_network.assert_called_once_with( {"network": {"name": mock.ANY}} ) self.nc.update_network.assert_called_once_with( net["id"], {"network": network_update_args} ) self.nc.create_network.reset_mock() self.nc.update_network.reset_mock() # admin_state_up is specified network_create_args = { "admin_state_up": False } scenario.run(network_create_args=network_create_args, network_update_args=network_update_args) self.nc.create_network.assert_called_once_with( {"network": {"name": mock.ANY, **network_create_args}} ) self.nc.update_network.assert_called_once_with( net["id"], {"network": network_update_args} ) def test_create_and_delete_networks(self): net = { "id": "network-id", "name": "network-name", "admin_state_up": False } self.nc.create_network.return_value = {"network": net} scenario = network.CreateAndDeleteNetworks(self.context) # Default options network_create_args = {} scenario.run(network_create_args=network_create_args) self.nc.create_network.assert_called_once_with( {"network": {"name": mock.ANY}} ) self.assertTrue(self.nc.delete_network.called) self.nc.create_network.reset_mock() self.nc.delete_network.reset_mock() # Explicit network name is specified network_create_args = {"admin_state_up": True} scenario.run(network_create_args=network_create_args) self.nc.create_network.assert_called_once_with( {"network": {"name": mock.ANY, **network_create_args}} ) self.assertTrue(self.nc.delete_network.called) def test_create_and_list_subnets(self): network_create_args = {"router:external": True} subnet_create_args = {"allocation_pools": []} subnet_cidr_start = "10.2.0.0/24" subnets_per_network = 5 net = mock.MagicMock() self.nc.create_network.return_value = {"network": net} self.nc.create_subnet.side_effect = [ {"subnet": {"id": i}} for i in range(subnets_per_network) ] scenario = network.CreateAndListSubnets(self.context) scenario.run(network_create_args=network_create_args, subnet_create_args=subnet_create_args, subnet_cidr_start=subnet_cidr_start, subnets_per_network=subnets_per_network) self.nc.create_network.assert_called_once_with( {"network": {"name": mock.ANY, **network_create_args}} ) self.assertEqual( [mock.call({"subnet": { "name": mock.ANY, "network_id": net["id"], "dns_nameservers": ["8.8.8.8", "8.8.4.4"], "ip_version": 4, "cidr": mock.ANY, **subnet_create_args}} )] * subnets_per_network, self.nc.create_subnet.call_args_list ) self.nc.list_subnets.assert_called_once_with() def test_create_and_show_subnets(self): network_create_args = {"router:external": True} subnet_create_args = {"allocation_pools": []} subnet_cidr_start = "1.1.0.0/30" subnets_per_network = 5 net = mock.MagicMock() self.nc.create_subnet.side_effect = [ {"subnet": {"id": i}} for i in range(subnets_per_network) ] scenario = network.CreateAndShowSubnets(self.context) scenario._get_or_create_network = mock.Mock(return_value=net) scenario.run(network_create_args=network_create_args, subnet_create_args=subnet_create_args, subnet_cidr_start=subnet_cidr_start, subnets_per_network=subnets_per_network) scenario._get_or_create_network.assert_called_once_with( **network_create_args) self.assertEqual( [mock.call({"subnet": { "name": mock.ANY, "network_id": net["id"], "dns_nameservers": ["8.8.8.8", "8.8.4.4"], "ip_version": 4, "cidr": mock.ANY, **subnet_create_args}} )] * subnets_per_network, self.nc.create_subnet.call_args_list ) self.assertEqual( [mock.call(i) for i in range(subnets_per_network)], self.nc.show_subnet.call_args_list ) def test_set_and_clear_router_gateway(self): network_create_args = {"router:external": True} router_create_args = {"admin_state_up": True} enable_snat = True ext_net = {"id": "ext-net-1"} router = {"id": "router-id"} self.nc.create_network.return_value = {"network": ext_net} self.nc.create_router.return_value = {"router": router} self.nc.list_extensions.return_value = { "extensions": [{"alias": "ext-gw-mode"}] } network.SetAndClearRouterGateway(self.context).run( enable_snat, network_create_args, router_create_args ) self.nc.create_network.assert_called_once_with( {"network": {"name": mock.ANY, **network_create_args}} ) self.nc.create_router.assert_called_once_with( {"router": {"name": mock.ANY, **router_create_args}} ) self.nc.add_gateway_router.assert_called_once_with( router["id"], {"network_id": ext_net["id"], "enable_snat": enable_snat} ) self.nc.remove_gateway_router.assert_called_once_with(router["id"]) def test_create_and_update_subnets(self): network_create_args = {"router:external": True} subnet_create_args = {"allocation_pools": []} subnet_update_args = {"enable_dhcp": True} subnet_cidr_start = "1.1.0.0/30" subnets_per_network = 5 net = mock.MagicMock() self.nc.create_network.return_value = {"network": net} self.nc.create_subnet.side_effect = [ {"subnet": {"id": i}} for i in range(subnets_per_network) ] scenario = network.CreateAndUpdateSubnets(self.context) scenario.run(subnet_update_args, network_create_args=network_create_args, subnet_create_args=subnet_create_args, subnet_cidr_start=subnet_cidr_start, subnets_per_network=subnets_per_network) self.nc.create_network.assert_called_once_with( {"network": {"name": mock.ANY, **network_create_args}} ) self.assertEqual( [mock.call({"subnet": { "name": mock.ANY, "network_id": net["id"], "dns_nameservers": ["8.8.8.8", "8.8.4.4"], "ip_version": 4, "cidr": mock.ANY, **subnet_create_args}} )] * subnets_per_network, self.nc.create_subnet.call_args_list ) self.assertEqual( [mock.call(s, {"subnet": subnet_update_args}) for s in range(subnets_per_network)], self.nc.update_subnet.call_args_list ) def test_create_and_delete_subnets(self): network_create_args = {"router:external": True} subnet_create_args = {"allocation_pools": []} subnet_cidr_start = "1.1.0.0/30" subnets_per_network = 5 net = mock.MagicMock() self.nc.create_subnet.side_effect = [ {"subnet": {"id": i}} for i in range(subnets_per_network) ] scenario = network.CreateAndDeleteSubnets(self.context) scenario._get_or_create_network = mock.Mock(return_value=net) scenario.run(network_create_args=network_create_args, subnet_create_args=subnet_create_args, subnet_cidr_start=subnet_cidr_start, subnets_per_network=subnets_per_network) scenario._get_or_create_network.assert_called_once_with( **network_create_args) self.assertEqual( [mock.call({"subnet": { "name": mock.ANY, "network_id": net["id"], "dns_nameservers": ["8.8.8.8", "8.8.4.4"], "ip_version": 4, "cidr": mock.ANY, **subnet_create_args}} )] * subnets_per_network, self.nc.create_subnet.call_args_list ) self.assertEqual( [mock.call(s) for s in range(subnets_per_network)], self.nc.delete_subnet.call_args_list ) def test_create_and_list_routers(self): network_create_args = {"router:external": True} subnet_create_args = {"allocation_pools": []} subnet_cidr_start = "1.1.0.0/30" subnets_per_network = 5 router_create_args = {"admin_state_up": True} net = {"id": "foo"} self.nc.create_network.return_value = {"network": net} scenario = network.CreateAndListRouters(self.context) scenario.run(network_create_args=network_create_args, subnet_create_args=subnet_create_args, subnet_cidr_start=subnet_cidr_start, subnets_per_network=subnets_per_network, router_create_args=router_create_args) self.nc.create_network.assert_called_once_with( {"network": {"name": mock.ANY, **network_create_args}} ) self.assertEqual( [mock.call({"subnet": { "name": mock.ANY, "network_id": net["id"], "dns_nameservers": ["8.8.8.8", "8.8.4.4"], "ip_version": 4, "cidr": mock.ANY, **subnet_create_args}} )] * subnets_per_network, self.nc.create_subnet.call_args_list ) self.assertEqual( [mock.call({"router": { "name": mock.ANY, **router_create_args}} )] * subnets_per_network, self.nc.create_router.call_args_list ) self.nc.list_routers.assert_called_once_with() def test_create_and_update_routers(self): router_update_args = {"admin_state_up": False} network_create_args = {"router:external": True} subnet_create_args = {"allocation_pools": []} subnet_cidr_start = "1.1.0.0/30" subnets_per_network = 5 router_create_args = {"admin_state_up": True} net = {"id": "foo"} self.nc.create_network.return_value = {"network": net} self.nc.create_subnet.side_effect = [ {"subnet": {"id": i}} for i in range(subnets_per_network) ] self.nc.create_router.side_effect = [ {"router": {"id": i}} for i in range(subnets_per_network) ] scenario = network.CreateAndUpdateRouters(self.context) scenario.run(router_update_args, network_create_args=network_create_args, subnet_create_args=subnet_create_args, subnet_cidr_start=subnet_cidr_start, subnets_per_network=subnets_per_network, router_create_args=router_create_args) self.nc.create_network.assert_called_once_with( {"network": {"name": mock.ANY, **network_create_args}} ) self.assertEqual( [mock.call({"subnet": { "name": mock.ANY, "network_id": net["id"], "dns_nameservers": ["8.8.8.8", "8.8.4.4"], "ip_version": 4, "cidr": mock.ANY, **subnet_create_args}} )] * subnets_per_network, self.nc.create_subnet.call_args_list ) self.assertEqual( [mock.call({"router": { "name": mock.ANY, **router_create_args}} )] * subnets_per_network, self.nc.create_router.call_args_list ) self.assertEqual( [mock.call(i, {"router": router_update_args}) for i in range(subnets_per_network)], self.nc.update_router.call_args_list ) def test_create_and_delete_routers(self): network_create_args = {"router:external": True} subnet_create_args = {"allocation_pools": []} subnet_cidr_start = "1.1.0.0/30" subnets_per_network = 5 router_create_args = {"admin_state_up": True} net = {"id": "foo"} self.nc.create_network.return_value = {"network": net} self.nc.create_subnet.side_effect = [ {"subnet": {"id": f"s-{i}"}} for i in range(subnets_per_network) ] self.nc.create_router.side_effect = [ {"router": {"id": f"r-{i}"}} for i in range(subnets_per_network) ] scenario = network.CreateAndDeleteRouters(self.context) scenario.run(network_create_args=network_create_args, subnet_create_args=subnet_create_args, subnet_cidr_start=subnet_cidr_start, subnets_per_network=subnets_per_network, router_create_args=router_create_args) self.nc.create_network.assert_called_once_with( {"network": {"name": mock.ANY, **network_create_args}} ) self.assertEqual( [mock.call({"subnet": { "name": mock.ANY, "network_id": net["id"], "dns_nameservers": ["8.8.8.8", "8.8.4.4"], "ip_version": 4, "cidr": mock.ANY, **subnet_create_args}} )] * subnets_per_network, self.nc.create_subnet.call_args_list ) self.assertEqual( [mock.call({"router": { "name": mock.ANY, **router_create_args}} )] * subnets_per_network, self.nc.create_router.call_args_list ) self.assertEqual( [mock.call(f"r-{i}", {"subnet_id": f"s-{i}"}) for i in range(subnets_per_network)], self.nc.remove_interface_router.call_args_list ) self.assertEqual( [mock.call(f"r-{i}") for i in range(subnets_per_network)], self.nc.delete_router.call_args_list ) def test_create_and_show_routers(self): network_create_args = {"router:external": True} subnet_create_args = {"allocation_pools": []} subnet_cidr_start = "1.1.0.0/30" subnets_per_network = 5 router_create_args = {"admin_state_up": True} net = {"id": "foo"} self.nc.create_network.return_value = {"network": net} self.nc.create_subnet.side_effect = [ {"subnet": {"id": i}} for i in range(subnets_per_network) ] self.nc.create_router.side_effect = [ {"router": {"id": i}} for i in range(subnets_per_network) ] scenario = network.CreateAndShowRouters(self.context) scenario.run(network_create_args=network_create_args, subnet_create_args=subnet_create_args, subnet_cidr_start=subnet_cidr_start, subnets_per_network=subnets_per_network, router_create_args=router_create_args) self.nc.create_network.assert_called_once_with( {"network": {"name": mock.ANY, **network_create_args}} ) self.assertEqual( [mock.call({"subnet": { "name": mock.ANY, "network_id": net["id"], "dns_nameservers": ["8.8.8.8", "8.8.4.4"], "ip_version": 4, "cidr": mock.ANY, **subnet_create_args}} )] * subnets_per_network, self.nc.create_subnet.call_args_list ) self.assertEqual( [mock.call({"router": { "name": mock.ANY, **router_create_args}} )] * subnets_per_network, self.nc.create_router.call_args_list ) self.assertEqual( [mock.call(i) for i in range(subnets_per_network)], self.nc.show_router.call_args_list ) def test_list_agents(self): agent_args = { "F": "id", "sort-dir": "asc" } scenario = network.ListAgents(self.context) scenario.run(agent_args=agent_args) self.nc.list_agents.assert_called_once_with(**agent_args) def test_create_and_list_ports(self): port_create_args = {"allocation_pools": []} ports_per_network = 10 network_create_args = {"router:external": True} net = mock.MagicMock() scenario = network.CreateAndListPorts(self.context) scenario._get_or_create_network = mock.Mock(return_value=net) scenario.run(network_create_args=network_create_args, port_create_args=port_create_args, ports_per_network=ports_per_network) scenario._get_or_create_network.assert_called_once_with( **network_create_args) self.assertEqual( [ mock.call({ "port": { "network_id": net["id"], "name": mock.ANY, **port_create_args } }) for _ in range(ports_per_network) ], self.nc.create_port.call_args_list ) self.nc.list_ports.assert_called_once_with() def test_create_and_update_ports(self): port_update_args = {"admin_state_up": False} port_create_args = {"allocation_pools": []} ports_per_network = 10 network_create_args = {"router:external": True} net = mock.MagicMock() self.nc.create_port.side_effect = [ {"port": {"id": f"p-{i}"}} for i in range(ports_per_network) ] scenario = network.CreateAndUpdatePorts(self.context) scenario._get_or_create_network = mock.Mock(return_value=net) scenario.run(port_update_args, network_create_args=network_create_args, port_create_args=port_create_args, ports_per_network=ports_per_network) scenario._get_or_create_network.assert_called_once_with( **network_create_args) self.assertEqual( [mock.call({"port": { "network_id": net["id"], "name": mock.ANY, **port_create_args}} )] * ports_per_network, self.nc.create_port.call_args_list ) self.assertEqual( [mock.call(f"p-{i}", {"port": port_update_args}) for i in range(ports_per_network)], self.nc.update_port.call_args_list ) def test_create_and_bind_ports(self): ports_per_network = 2 port_update_args = { "device_owner": "compute:nova", "device_id": "ba805478-85ff-11e9-a2e4-2b8dea218fc8", "binding:host_id": "fake-host", } net = {"id": "net-id"} self.context.update({ "tenants": { "tenant-1": { "id": "tenant-1", "networks": [ net ], }, }, "networking_agents": [{ "host": "fake-host", "alive": True, "admin_state_up": True, "agent_type": "Open vSwitch agent", }], }) scenario = network.CreateAndBindPorts(self.context) scenario.admin_neutron = mock.MagicMock() self.nc.create_port.side_effect = [ {"port": {"id": f"p-{i}"}} for i in range(ports_per_network) ] scenario.run(ports_per_network=ports_per_network) self.assertEqual( [mock.call({"port": { "network_id": net["id"], "name": mock.ANY}} )] * ports_per_network, self.nc.create_port.call_args_list ) self.assertEqual( [mock.call(port_id=f"p-{i}", **port_update_args) for i in range(ports_per_network)], scenario.admin_neutron.update_port.call_args_list ) def test_create_and_show_ports(self): port_create_args = {"allocation_pools": []} ports_per_network = 1 network_create_args = {"router:external": True} net = mock.MagicMock() scenario = network.CreateAndShowPorts(self.context) scenario._get_or_create_network = mock.MagicMock(return_value=net) port = {"id": 1, "name": "f"} self.nc.create_port.return_value = {"port": port} scenario.run(network_create_args=network_create_args, port_create_args=port_create_args, ports_per_network=ports_per_network) scenario._get_or_create_network.assert_called_once_with( **network_create_args) self.nc.create_port.assert_called_with({"port": { "network_id": net["id"], "name": mock.ANY, **port_create_args }}) self.nc.show_port.assert_called_with(port["id"]) def test_create_and_delete_ports(self): port_create_args = {"allocation_pools": []} ports_per_network = 10 network_create_args = {"router:external": True} net = mock.MagicMock() self.nc.create_port.side_effect = [ {"port": {"id": f"p-{i}"}} for i in range(ports_per_network) ] scenario = network.CreateAndDeletePorts(self.context) scenario._get_or_create_network = mock.Mock(return_value=net) scenario.run(network_create_args=network_create_args, port_create_args=port_create_args, ports_per_network=ports_per_network) scenario._get_or_create_network.assert_called_once_with( **network_create_args) self.assertEqual( [mock.call({"port": { "network_id": net["id"], "name": mock.ANY, **port_create_args}} )] * ports_per_network, self.nc.create_port.call_args_list ) self.assertEqual( [mock.call(f"p-{i}") for i in range(ports_per_network)], self.nc.delete_port.call_args_list ) @ddt.data( {}, {"floating_ip_args": {"floating_ip_address": "1.1.1.1"}}, ) @ddt.unpack def test_create_and_list_floating_ips(self, floating_ip_args=None): floating_ip_args = floating_ip_args or {} floating_network = {"id": "ext-net"} scenario = network.CreateAndListFloatingIps(self.context) self.nc.create_floatingip.return_value = {"floatingip": mock.Mock()} self.nc.list_floatingips.return_value = {"floatingips": mock.Mock()} scenario.run(floating_network=floating_network, floating_ip_args=floating_ip_args) self.nc.create_floatingip.assert_called_once_with( {"floatingip": {"description": mock.ANY, "floating_network_id": floating_network["id"], **floating_ip_args}}) self.nc.list_floatingips.assert_called_once_with() @ddt.data( {}, {"floating_ip_args": {"floating_ip_address": "1.1.1.1"}}, ) @ddt.unpack def test_create_and_delete_floating_ips(self, floating_ip_args=None): floating_network = {"id": "ext-net"} floating_ip_args = floating_ip_args or {} floatingip = {"id": "floating-ip-id"} self.nc.create_floatingip.return_value = {"floatingip": floatingip} scenario = network.CreateAndDeleteFloatingIps(self.context) scenario.run(floating_network=floating_network, floating_ip_args=floating_ip_args) self.nc.create_floatingip.assert_called_once_with( {"floatingip": {"description": mock.ANY, "floating_network_id": floating_network["id"], **floating_ip_args}}) self.nc.delete_floatingip.assert_called_once_with(floatingip["id"]) def test_associate_and_dissociate_floating_ips(self): floating_network = { "id": "floating-net-id", "name": "public", "router:external": True } floatingip = {"id": "floating-ip-id"} net = {"id": "net-id"} subnet = {"id": "subnet-id"} port = {"id": "port-id"} router = {"id": "router-id"} self.nc.create_floatingip.return_value = {"floatingip": floatingip} self.nc.create_network.return_value = {"network": net} self.nc.create_subnet.return_value = {"subnet": subnet} self.nc.create_port.return_value = {"port": port} self.nc.create_router.return_value = {"router": router} self.nc.list_networks.return_value = {"networks": [floating_network]} network.AssociateAndDissociateFloatingIps(self.context).run( floating_network=floating_network["name"]) self.nc.create_floatingip.assert_called_once_with( {"floatingip": {"description": mock.ANY, "floating_network_id": floating_network["id"]}}) self.nc.create_network.assert_called_once_with( {"network": {"name": mock.ANY}} ) self.nc.create_subnet.assert_called_once_with( {"subnet": { "name": mock.ANY, "network_id": net["id"], "dns_nameservers": ["8.8.8.8", "8.8.4.4"], "ip_version": 4, "cidr": mock.ANY }} ) self.nc.create_port.assert_called_once_with( {"port": {"name": mock.ANY, "network_id": net["id"]}} ) self.nc.add_gateway_router.assert_called_once_with( router["id"], {"network_id": floating_network["id"]} ) self.nc.add_interface_router.assert_called_once_with( router["id"], {"subnet_id": subnet["id"]} ) self.assertEqual( [ mock.call( floatingip["id"], {"floatingip": {"port_id": port["id"]}} ), mock.call( floatingip["id"], {"floatingip": {"port_id": None}} ) ], self.nc.update_floatingip.call_args_list ) def test_delete_subnets(self): # do not guess what user will be used self.context["user_choice_method"] = "round_robin" # if it is the 4th iteration, the second user from the second tenant # should be taken, which means that the second subnets from each # tenant network should be removed. self.context["iteration"] = 4 # in case of `round_robin` the user will be selected from the list of # available users of particular tenant, not from the list of all # tenants (i.e random choice). BUT to trigger selecting user and # tenant `users` key should present in context dict self.context["users"] = [] self.context["tenants"] = { # this should not be used "uuid-1": { "id": "uuid-1", "networks": [{"subnets": ["subnet-1"]}], "users": [{"id": "user-1", "credential": mock.MagicMock()}, {"id": "user-2", "credential": mock.MagicMock()}] }, # this is expected user "uuid-2": { "id": "uuid-2", "networks": [ {"subnets": ["subnet-2", "subnet-3"]}, {"subnets": ["subnet-4", "subnet-5"]}], "users": [{"id": "user-3", "credential": mock.MagicMock()}, {"id": "user-4", "credential": mock.MagicMock()}] } } scenario = network.DeleteSubnets(self.context) self.assertEqual("user-4", scenario.context["user"]["id"], "Unexpected user is taken. The wrong subnets can be " "affected(removed).") scenario.run() self.assertEqual( [ mock.call("subnet-3"), mock.call("subnet-5") ], self.nc.delete_subnet.call_args_list)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,838
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/scenarios/senlin/utils.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import cfg from rally import exceptions from rally.task import atomic from rally.task import utils from rally_openstack.task import scenario CONF = cfg.CONF class SenlinScenario(scenario.OpenStackScenario): """Base class for Senlin scenarios with basic atomic actions.""" @atomic.action_timer("senlin.list_clusters") def _list_clusters(self, **queries): """Return user cluster list. :param kwargs **queries: Optional query parameters to be sent to restrict the clusters to be returned. Available parameters include: * name: The name of a cluster. * status: The current status of a cluster. * sort: A list of sorting keys separated by commas. Each sorting key can optionally be attached with a sorting direction modifier which can be ``asc`` or ``desc``. * limit: Requests a specified size of returned items from the query. Returns a number of items up to the specified limit value. * marker: Specifies the ID of the last-seen item. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the marker parameter value in a subsequent limited request. * global_project: A boolean value indicating whether clusters from all projects will be returned. :returns: list of clusters according to query. """ return list(self.admin_clients("senlin").clusters(**queries)) @atomic.action_timer("senlin.create_cluster") def _create_cluster(self, profile_id, desired_capacity=0, min_size=0, max_size=-1, timeout=60, metadata=None): """Create a new cluster from attributes. :param profile_id: ID of profile used to create cluster :param desired_capacity: The capacity or initial number of nodes owned by the cluster :param min_size: The minimum number of nodes owned by the cluster :param max_size: The maximum number of nodes owned by the cluster. -1 means no limit :param timeout: The timeout value in minutes for cluster creation :param metadata: A set of key value pairs to associate with the cluster :returns: object of cluster created. """ attrs = { "profile_id": profile_id, "name": self.generate_random_name(), "desired_capacity": desired_capacity, "min_size": min_size, "max_size": max_size, "metadata": metadata, "timeout": timeout } cluster = self.admin_clients("senlin").create_cluster(**attrs) cluster = utils.wait_for_status( cluster, ready_statuses=["ACTIVE"], failure_statuses=["ERROR"], update_resource=self._get_cluster, timeout=CONF.openstack.senlin_action_timeout) return cluster def _get_cluster(self, cluster): """Get cluster details. :param cluster: cluster to get :returns: object of cluster """ try: return self.admin_clients("senlin").get_cluster(cluster.id) except Exception as e: if getattr(e, "code", getattr(e, "http_status", 400)) == 404: raise exceptions.GetResourceNotFound(resource=cluster.id) raise exceptions.GetResourceFailure(resource=cluster.id, err=e) @atomic.action_timer("senlin.delete_cluster") def _delete_cluster(self, cluster): """Delete given cluster. Returns after the cluster is successfully deleted. :param cluster: cluster object to delete """ self.admin_clients("senlin").delete_cluster(cluster) utils.wait_for_status( cluster, ready_statuses=["DELETED"], failure_statuses=["ERROR"], check_deletion=True, update_resource=self._get_cluster, timeout=CONF.openstack.senlin_action_timeout) @atomic.action_timer("senlin.create_profile") def _create_profile(self, spec, metadata=None): """Create a new profile from attributes. :param spec: spec dictionary used to create profile :param metadata: A set of key value pairs to associate with the profile :returns: object of profile created """ attrs = {} attrs["spec"] = spec attrs["name"] = self.generate_random_name() if metadata: attrs["metadata"] = metadata return self.clients("senlin").create_profile(**attrs) @atomic.action_timer("senlin.delete_profile") def _delete_profile(self, profile): """Delete given profile. Returns after the profile is successfully deleted. :param profile: profile object to be deleted """ self.clients("senlin").delete_profile(profile)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,839
openstack/rally-openstack
refs/heads/master
/tests/unit/task/contexts/vm/test_custom_image.py
# Copyright 2015: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally.task import context from rally_openstack.task.contexts.vm import custom_image from tests.unit import test BASE = "rally_openstack.task.contexts.vm.custom_image" @context.configure(name="test_custom_image", order=500) class FakeImageGenerator(custom_image.BaseCustomImageGenerator): def _customize_image(self, *args): pass class BaseCustomImageContextVMTestCase(test.TestCase): def setUp(self): super(BaseCustomImageContextVMTestCase, self).setUp() self.context = test.get_test_context() self.context.update({ "config": { "test_custom_image": { "image": {"name": "image"}, "flavor": {"name": "flavor"}, "username": "fedora", "floating_network": "floating", "port": 1022, } }, "admin": { "credential": mock.Mock(), }, "users": [ {"tenant_id": "tenant_id0"}, {"tenant_id": "tenant_id1"}, {"tenant_id": "tenant_id2"} ], "tenants": { "tenant_id0": {}, "tenant_id1": {}, "tenant_id2": {} } }) @mock.patch("%s.osclients.Clients" % BASE) @mock.patch("%s.types.GlanceImage" % BASE) @mock.patch("%s.types.Flavor" % BASE) @mock.patch("%s.vmtasks.BootRuncommandDelete" % BASE) def test_create_one_image( self, mock_boot_runcommand_delete, mock_flavor, mock_glance_image, mock_clients): mock_flavor.return_value.pre_process.return_value = "flavor" mock_glance_image.return_value.pre_process.return_value = "image" ip = {"ip": "foo_ip", "id": "foo_id", "is_floating": True} fake_server = mock.Mock() fake_image = {"id": "image"} scenario = mock_boot_runcommand_delete.return_value = mock.MagicMock( _create_image=mock.MagicMock(return_value=fake_image), _boot_server_with_fip=mock.MagicMock( return_value=(fake_server, ip)) ) generator_ctx = FakeImageGenerator(self.context) generator_ctx._customize_image = mock.MagicMock() user = { "credential": "credential", "keypair": {"name": "keypair_name"}, "secgroup": {"name": "secgroup_name"} } custom_image = generator_ctx.create_one_image(user, foo_arg="foo_value") self.assertEqual({"id": "image"}, custom_image) mock_flavor.assert_called_once_with(self.context) mock_flavor.return_value.pre_process.assert_called_once_with( resource_spec={"name": "flavor"}, config={}) mock_glance_image.assert_called_once_with(self.context) mock_glance_image.return_value.pre_process.assert_called_once_with( resource_spec={"name": "image"}, config={}) mock_boot_runcommand_delete.assert_called_once_with( self.context, clients=mock_clients.return_value) scenario._boot_server_with_fip.assert_called_once_with( image="image", flavor="flavor", floating_network="floating", key_name="keypair_name", security_groups=["secgroup_name"], userdata=None, foo_arg="foo_value") scenario._stop_server.assert_called_once_with(fake_server) generator_ctx._customize_image.assert_called_once_with( fake_server, ip, user) scenario._create_image.assert_called_once_with(fake_server) scenario._delete_server_with_fip.assert_called_once_with( fake_server, ip) @mock.patch("%s.image.Image" % BASE) def test_delete_one_image(self, mock_image): generator_ctx = FakeImageGenerator(self.context) credential = mock.Mock() user = {"credential": credential, "keypair": {"name": "keypair_name"}} custom_image = mock.Mock(id="image") generator_ctx.delete_one_image(user, custom_image) mock_image.return_value.delete_image.assert_called_once_with("image") @mock.patch("%s.image.Image" % BASE) def test_setup_admin(self, mock_image): self.context["tenants"]["tenant_id0"]["networks"] = [ {"id": "network_id"}] generator_ctx = FakeImageGenerator(self.context) image = mock.Mock(id="custom_image") generator_ctx.create_one_image = mock.Mock(return_value=image) generator_ctx.setup() mock_image.return_value.set_visibility.assert_called_once_with( image.id) generator_ctx.create_one_image.assert_called_once_with( self.context["users"][0], nics=[{"net-id": "network_id"}]) def test_cleanup_admin(self): tenant = self.context["tenants"]["tenant_id0"] custom_image = tenant["custom_image"] = {"id": "image"} generator_ctx = FakeImageGenerator(self.context) generator_ctx.delete_one_image = mock.Mock() generator_ctx.cleanup() generator_ctx.delete_one_image.assert_called_once_with( self.context["users"][0], custom_image) def test_setup(self): self.context.pop("admin") generator_ctx = FakeImageGenerator(self.context) generator_ctx.create_one_image = mock.Mock( side_effect=["custom_image0", "custom_image1", "custom_image2"]) generator_ctx.setup() self.assertEqual( [mock.call(user) for user in self.context["users"]], generator_ctx.create_one_image.mock_calls) for i in range(3): self.assertEqual( "custom_image%d" % i, self.context["tenants"]["tenant_id%d" % i]["custom_image"] ) def test_cleanup(self): self.context.pop("admin") for i in range(3): self.context["tenants"]["tenant_id%d" % i]["custom_image"] = { "id": "custom_image%d" % i} generator_ctx = FakeImageGenerator(self.context) generator_ctx.delete_one_image = mock.Mock() generator_ctx.cleanup() self.assertEqual( [mock.call(self.context["users"][i], {"id": "custom_image%d" % i}) for i in range(3)], generator_ctx.delete_one_image.mock_calls)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,840
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/swift/test_utils.py
# Copyright 2015: Cisco Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from rally_openstack.task.scenarios.swift import utils from tests.unit import test SWIFT_UTILS = "rally_openstack.task.scenarios.swift.utils" @ddt.ddt class SwiftScenarioTestCase(test.ScenarioTestCase): def test__list_containers(self): headers_dict = mock.MagicMock() containers_list = mock.MagicMock() self.clients("swift").get_account.return_value = (headers_dict, containers_list) scenario = utils.SwiftScenario(context=self.context) self.assertEqual((headers_dict, containers_list), scenario._list_containers(fargs="f")) kw = {"full_listing": True, "fargs": "f"} self.clients("swift").get_account.assert_called_once_with(**kw) self._test_atomic_action_timer(scenario.atomic_actions(), "swift.list_containers") @ddt.data( {}, {"headers": {"X-fake-name": "fake-value"}}, {"public": False, "headers": {"X-fake-name": "fake-value"}}, {"public": False}) @ddt.unpack def test__create_container(self, public=True, kwargs=None, headers=None): if kwargs is None: kwargs = {"fakearg": "fake"} if headers is None: headers = {} scenario = utils.SwiftScenario(self.context) scenario.generate_random_name = mock.MagicMock() container = scenario._create_container(public=public, headers=headers, **kwargs) self.assertEqual(container, scenario.generate_random_name.return_value) kwargs["headers"] = headers kwargs["headers"]["X-Container-Read"] = ".r:*,.rlistings" self.clients("swift").put_container.assert_called_once_with( scenario.generate_random_name.return_value, **kwargs) self._test_atomic_action_timer(scenario.atomic_actions(), "swift.create_container") def test__delete_container(self): container_name = mock.MagicMock() scenario = utils.SwiftScenario(context=self.context) scenario._delete_container(container_name, fargs="f") kw = {"fargs": "f"} self.clients("swift").delete_container.assert_called_once_with( container_name, **kw) self._test_atomic_action_timer(scenario.atomic_actions(), "swift.delete_container") def test__list_objects(self): container_name = mock.MagicMock() headers_dict = mock.MagicMock() objects_list = mock.MagicMock() self.clients("swift").get_container.return_value = (headers_dict, objects_list) scenario = utils.SwiftScenario(context=self.context) self.assertEqual((headers_dict, objects_list), scenario._list_objects(container_name, fargs="f")) kw = {"full_listing": True, "fargs": "f"} self.clients("swift").get_container.assert_called_once_with( container_name, **kw) self._test_atomic_action_timer(scenario.atomic_actions(), "swift.list_objects") def test__upload_object(self): container_name = mock.MagicMock() content = mock.MagicMock() etag = mock.MagicMock() self.clients("swift").put_object.return_value = etag scenario = utils.SwiftScenario(self.context) scenario.generate_random_name = mock.MagicMock() self.clients("swift").put_object.reset_mock() self.assertEqual((etag, scenario.generate_random_name.return_value), scenario._upload_object(container_name, content, fargs="f")) kw = {"fargs": "f"} self.clients("swift").put_object.assert_called_once_with( container_name, scenario.generate_random_name.return_value, content, **kw) self.assertEqual(1, scenario.generate_random_name.call_count) self._test_atomic_action_timer(scenario.atomic_actions(), "swift.upload_object") def test__download_object(self): container_name = mock.MagicMock() object_name = mock.MagicMock() headers_dict = mock.MagicMock() content = mock.MagicMock() self.clients("swift").get_object.return_value = (headers_dict, content) scenario = utils.SwiftScenario(context=self.context) self.assertEqual((headers_dict, content), scenario._download_object(container_name, object_name, fargs="f")) kw = {"fargs": "f"} self.clients("swift").get_object.assert_called_once_with( container_name, object_name, **kw) self._test_atomic_action_timer(scenario.atomic_actions(), "swift.download_object") def test__delete_object(self): container_name = mock.MagicMock() object_name = mock.MagicMock() scenario = utils.SwiftScenario(context=self.context) scenario._delete_object(container_name, object_name, fargs="f") kw = {"fargs": "f"} self.clients("swift").delete_object.assert_called_once_with( container_name, object_name, **kw) self._test_atomic_action_timer(scenario.atomic_actions(), "swift.delete_object")
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,841
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/barbican/test_secrets.py
# Copyright 2018 Red Hat Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally_openstack.task.scenarios.barbican import secrets from tests.unit import fakes from tests.unit import test class BarbicanSecretsTestCase(test.ScenarioTestCase): def get_test_context(self): context = super(BarbicanSecretsTestCase, self).get_test_context() context.update({ "admin": { "user_id": "fake", "credential": mock.MagicMock() }, "user": { "user_id": "fake", "credential": mock.MagicMock() }, "tenant": {"id": "fake"} }) return context def setUp(self): super(BarbicanSecretsTestCase, self).setUp() m = "rally_openstack.common.services.key_manager.barbican" patch = mock.patch("%s.BarbicanService" % m) self.addCleanup(patch.stop) self.mock_secrets = patch.start() def test_list_secrets(self): secrets_service = self.mock_secrets.return_value scenario = secrets.BarbicanSecretsList(self.context) scenario.run() secrets_service.list_secrets.assert_called_once_with() def test_create_secret(self): secrets_service = self.mock_secrets.return_value scenario = secrets.BarbicanSecretsCreate(self.context) scenario.run() secrets_service.create_secret.assert_called_once_with() def test_create_and_delete_secret(self): secrets_service = self.mock_secrets.return_value scenario = secrets.BarbicanSecretsCreateAndDelete(self.context) scenario.run() secrets_service.create_secret.assert_called_once_with() self.assertEqual(1, secrets_service.delete_secret.call_count) def test_create_and_get_secret(self): secrets_service = self.mock_secrets.return_value fake_secret = fakes.FakeSecret(id=1, name="secretxxx") secrets_service.create_secret.return_value = fake_secret fake_secret_info = fakes.FakeSecret(id=1, name="secret1xxx") secrets_service.get_secret.return_value = fake_secret_info scenario = secrets.BarbicanSecretsCreateAndGet(self.context) scenario.run() secrets_service.create_secret.assert_called_once_with() def test_get_secret(self): secrets_service = self.mock_secrets.return_value scenario = secrets.BarbicanSecretsGet(self.context) scenario.run() secrets_service.create_secret.assert_called_once_with() def test_get_secret_with_secret(self): secret = mock.Mock() secret.secret_ref = mock.Mock() secrets_service = self.mock_secrets.return_value scenario = secrets.BarbicanSecretsGet(self.context) scenario.run() self.assertEqual(1, secrets_service.get_secret.call_count) def test_create_and_list_secret(self): secrets_service = self.mock_secrets.return_value scenario = secrets.BarbicanSecretsCreateAndList(self.context) scenario.run() secrets_service.create_secret.assert_called_once_with() secrets_service.list_secrets.assert_called_once_with() def test_create_and_delete_symmetric_secret(self): secrets_service = self.mock_secrets.return_value scenario = secrets.BarbicanSecretsCreateSymmetricAndDelete( self.context) scenario.run( payload="rally_data", algorithm="aes", bit_length=256, mode="cbc") self.assertEqual(1, secrets_service.create_secret.call_count)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,842
openstack/rally-openstack
refs/heads/master
/tests/ci/rally_verify.py
#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import argparse import collections import gzip import json import logging import os import re import subprocess import sys import uuid import jinja2 from rally import api from rally.env import env_mgr from rally_openstack.common import consts from rally_openstack.common import credential LOG = logging.getLogger("verify-job") LOG.setLevel(logging.DEBUG) # NOTE(andreykurilin): this variable is used to generate output file names # with prefix ${CALL_COUNT}_ . _call_count = 0 class Status(object): PASS = "success" ERROR = "error" SKIPPED = "skip" FAILURE = "fail" class Step(object): COMMAND = None DEPENDS_ON = None CALL_ARGS = {} BASE_DIR = "rally-verify" HTML_TEMPLATE = ("<span class=\"%(status)s\">[%(status)s]</span>\n" "<a href=\"%(output_file)s\">%(doc)s</a>\n" "<code>$ %(cmd)s</code>") def __init__(self, args, rapi): self.args = args self.rapi = rapi self.result = {"status": Status.PASS, "doc": self.__doc__, "cmd": "None command found"} @property def name(self): return " ".join(re.findall("[A-Z][^A-Z]*", self.__class__.__name__)).lower() def check(self, results): """Check weather this step should be executed or skipped.""" if self.DEPENDS_ON is not None: if results[self.DEPENDS_ON].result["status"] in ( Status.PASS, Status.FAILURE): return True else: self.result["status"] = Status.SKIPPED msg = ("Step '%s' is skipped, since depends on step '%s' is " "skipped or finished with an error." % (self.name, results[self.DEPENDS_ON].name)) stdout_file = self._generate_path( "%s.txt" % self.__class__.__name__) self.result["output_file"] = self._write_file( stdout_file, msg, compress=False) return False return True def setUp(self): """Obtain variables required for execution""" pass def run(self): """Execute step. The default action - execute the command""" self.setUp() cmd = "rally --rally-debug %s" % (self.COMMAND % self.CALL_ARGS) self.result["cmd"] = cmd self.result["status"], self.result["output"] = self.call_rally(cmd) stdout_file = self._generate_path("%s.txt" % cmd) self.result["output_file"] = self._write_file( stdout_file, self.result["output"], compress=False) @classmethod def _generate_path(cls, root): global _call_count _call_count += 1 root = root.replace("<", "").replace(">", "").replace("/", "_") parts = ["%s" % _call_count] for path in root.split(" "): if path.startswith(cls.BASE_DIR): path = path[len(cls.BASE_DIR) + 1:] parts.append(path) return os.path.join(cls.BASE_DIR, "_".join(parts)) @classmethod def _write_file(cls, path, data, compress=False): """Create a file and write some data to it.""" if compress: with gzip.open(path, "w") as f: if not isinstance(data, bytes): data = data.encode() f.write(data) else: with open(path, "w") as f: f.write(data) return path @staticmethod def call_rally(command): """Execute a Rally verify command.""" try: LOG.info("Start `%s` command." % command) stdout = subprocess.check_output(command.split(), stderr=subprocess.STDOUT).decode() except subprocess.CalledProcessError as e: LOG.error("Command `%s` failed." % command) return Status.ERROR, e.output.decode() else: return Status.PASS, stdout def to_html(self): if self.result["status"] == Status.SKIPPED: return "" else: return self.HTML_TEMPLATE % self.result class SetUpStep(Step): """Validate deployment, create required resources and directories.""" ENV_NAME = "tempest" def run(self): if not os.path.exists("%s/extra" % self.BASE_DIR): os.makedirs("%s/extra" % self.BASE_DIR) # ensure that environment exit and check it env = env_mgr.EnvManager.get(self.ENV_NAME) for p_name, status in env.check_health().items(): if not status["available"]: self.result["status"] = Status.ERROR return try: subprocess.check_call( ["rally", "env", "use", "--env", self.ENV_NAME], stdout=sys.stdout) except subprocess.CalledProcessError: self.result["status"] = Status.ERROR return openstack_platform = env.data["platforms"]["openstack"] admin_creds = credential.OpenStackCredential( permission=consts.EndpointPermission.ADMIN, **openstack_platform["platform_data"]["admin"]) clients = admin_creds.clients() if self.args.ctx_create_resources: # If the 'ctx-create-resources' arg is provided, delete images and # flavors, and also create a shared network to make Tempest context # create needed resources. LOG.info("The 'ctx-create-resources' arg is provided. Deleting " "images and flavors, and also creating a shared network " "to make Tempest context create needed resources.") LOG.info("Deleting images.") for image in clients.glance().images.list(): clients.glance().images.delete(image.id) LOG.info("Deleting flavors.") for flavor in clients.nova().flavors.list(): clients.nova().flavors.delete(flavor.id) LOG.info("Creating a shared network.") net_body = { "network": { "name": "shared-net-%s" % str(uuid.uuid4()), "tenant_id": clients.keystone.auth_ref.project_id, "shared": True } } clients.neutron().create_network(net_body) else: # Otherwise, just in case create only flavors with the following # properties: RAM = 64MB and 128MB, VCPUs = 1, disk = 0GB to make # Tempest context discover them. LOG.info("The 'ctx-create-resources' arg is not provided. " "Creating flavors to make Tempest context discover them.") for flv_ram in [64, 128]: params = { "name": "flavor-%s" % str(uuid.uuid4()), "ram": flv_ram, "vcpus": 1, "disk": 0 } LOG.info("Creating flavor '%s' with the following properties: " "RAM = %dMB, VCPUs = 1, disk = 0GB" % (params["name"], flv_ram)) clients.nova().flavors.create(**params) def to_html(self): return "" class ListPlugins(Step): """List plugins for verifiers management.""" COMMAND = "verify list-plugins" DEPENDS_ON = SetUpStep class CreateVerifier(Step): """Create a Tempest verifier.""" COMMAND = ("verify create-verifier --type %(type)s --name %(name)s " "--source %(source)s") DEPENDS_ON = ListPlugins CALL_ARGS = {"type": "tempest", "name": "my-verifier", "source": "https://opendev.org/openstack/tempest"} class ShowVerifier(Step): """Show information about the created verifier.""" COMMAND = "verify show-verifier" DEPENDS_ON = CreateVerifier class ListVerifiers(Step): """List all installed verifiers.""" COMMAND = "verify list-verifiers" DEPENDS_ON = CreateVerifier class UpdateVerifier(Step): """Switch the verifier to the penultimate version.""" COMMAND = "verify update-verifier --version %(version)s --update-venv" DEPENDS_ON = CreateVerifier def setUp(self): """Obtain penultimate verifier commit for downgrading to it""" verifier_id = self.rapi.verifier.list()[0]["uuid"] verifications_dir = os.path.join( os.path.expanduser("~"), ".rally/verification/verifier-%s/repo" % verifier_id) # Get the penultimate verifier commit ID p_commit_id = subprocess.check_output( ["git", "log", "-n", "1", "--pretty=format:%H"], cwd=verifications_dir).decode().strip() self.CALL_ARGS = {"version": p_commit_id} class ConfigureVerifier(Step): """Generate and show the verifier config file.""" COMMAND = "verify configure-verifier --show" DEPENDS_ON = CreateVerifier class ExtendVerifier(Step): """Extend verifier with keystone integration tests.""" COMMAND = "verify add-verifier-ext --source %(source)s" DEPENDS_ON = CreateVerifier CALL_ARGS = {"source": "https://opendev.org/openstack/" "keystone-tempest-plugin"} class ListVerifierExtensions(Step): """List all extensions of verifier.""" COMMAND = "verify list-verifier-exts" DEPENDS_ON = ExtendVerifier class ListVerifierTests(Step): """List all tests of specific verifier.""" COMMAND = "verify list-verifier-tests" DEPENDS_ON = CreateVerifier class RunVerification(Step): """Run a verification.""" DEPENDS_ON = ConfigureVerifier COMMAND = ("verify start --pattern set=%(set)s --skip-list %(skip_tests)s " "--xfail-list %(xfail_tests)s --tag %(tag)s %(set)s-set " "--detailed") SKIP_TESTS = { "tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON." "test_get_flavor[id-1f12046b-753d-40d2-abb6-d8eb8b30cb2f,smoke]": "This test was skipped intentionally", } XFAIL_TESTS = { "tempest.scenario.test_dashboard_basic_ops" ".TestDashboardBasicOps.test_basic_scenario" "[dashboard,id-4f8851b1-0e69-482b-b63b-84c6e76f6c80,smoke]": "Fails for unknown reason", } def setUp(self): self.CALL_ARGS["tag"] = "tag-1 tag-2" self.CALL_ARGS["set"] = "full" if self.args.mode == "full" else "smoke" # Start a verification, show results and generate reports skip_tests = json.dumps(self.SKIP_TESTS) xfail_tests = json.dumps(self.XFAIL_TESTS) self.CALL_ARGS["skip_tests"] = self._write_file( self._generate_path("skip-list.json"), skip_tests) self.CALL_ARGS["xfail_tests"] = self._write_file( self._generate_path("xfail-list.json"), xfail_tests) def run(self): super(RunVerification, self).run() if "Success: 0" in self.result["output"]: self.result["status"] = Status.FAILURE class ReRunVerification(RunVerification): """Re-Run previous verification.""" COMMAND = "verify rerun --tag one-more-attempt" class ShowVerification(Step): """Show results of verification.""" COMMAND = "verify show" DEPENDS_ON = RunVerification class ShowSecondVerification(ShowVerification): """Show results of verification.""" DEPENDS_ON = ReRunVerification class ShowDetailedVerification(Step): """Show detailed results of verification.""" COMMAND = "verify show --detailed" DEPENDS_ON = RunVerification class ShowDetailedSecondVerification(ShowDetailedVerification): """Show detailed results of verification.""" DEPENDS_ON = ReRunVerification class ReportVerificationMixin(Step): """Mixin for obtaining reports of verifications.""" COMMAND = "verify report --uuid %(uuids)s --type %(type)s --to %(out)s" HTML_TEMPLATE = ("<span class=\"%(status)s\">[%(status)s]</span>\n" "<a href=\"%(out)s\">%(doc)s</a> " "[<a href=\"%(output_file)s\">Output from CLI</a>]\n" "<code>$ %(cmd)s</code>") def setUp(self): self.CALL_ARGS["out"] = "<path>" self.CALL_ARGS["uuids"] = "<uuid-1> <uuid-2>" cmd = self.COMMAND % self.CALL_ARGS report = "%s.%s" % (cmd.replace("/", "_").replace(" ", "_"), self.CALL_ARGS["type"]) print(report) self.CALL_ARGS["out"] = self._generate_path(report) self.CALL_ARGS["uuids"] = " ".join( [v["uuid"] for v in self.rapi.verification.list()]) print(self.COMMAND % self.CALL_ARGS) self.result["out"] = "<None>" class HtmlVerificationReport(ReportVerificationMixin): """Generate HTML report for verification(s).""" CALL_ARGS = {"type": "html-static"} DEPENDS_ON = RunVerification def setUp(self): super(HtmlVerificationReport, self).setUp() self.CALL_ARGS["out"] = self.CALL_ARGS["out"][:-7] class JsonVerificationReport(ReportVerificationMixin): """Generate JSON report for verification(s).""" CALL_ARGS = {"type": "json"} DEPENDS_ON = RunVerification class JunitVerificationReport(ReportVerificationMixin): """Generate JUNIT report for verification(s).""" CALL_ARGS = {"type": "junit-xml"} DEPENDS_ON = RunVerification class ListVerifications(Step): """List all verifications.""" COMMAND = "verify list" DEPENDS_ON = CreateVerifier class DeleteVerifierExtension(Step): """Delete keystone extension.""" COMMAND = "verify delete-verifier-ext --name %(name)s" CALL_ARGS = {"name": "keystone_tests"} DEPENDS_ON = ExtendVerifier class DeleteVerifier(Step): """Delete only Tempest verifier. all verifications will be delete when destroy deployment. """ COMMAND = "verify delete-verifier --id %(id)s --force" CALL_ARGS = {"id": CreateVerifier.CALL_ARGS["name"]} DEPENDS_ON = CreateVerifier class DestroyDeployment(Step): """Delete the deployment, and verifications of this deployment.""" COMMAND = "deployment destroy --deployment %(id)s" CALL_ARGS = {"id": SetUpStep.ENV_NAME} DEPENDS_ON = SetUpStep def run(args): steps = [SetUpStep, ListPlugins, CreateVerifier, ShowVerifier, ListVerifiers, UpdateVerifier, ConfigureVerifier, ExtendVerifier, ListVerifierExtensions, ListVerifierTests, RunVerification, ShowVerification, ShowDetailedVerification, HtmlVerificationReport, JsonVerificationReport, JunitVerificationReport, ListVerifications, DeleteVerifierExtension, DestroyDeployment, DeleteVerifier] if args.compare: # need to launch one more verification place_to_insert = steps.index(ShowDetailedVerification) + 1 # insert steps in reverse order to be able to use the same index steps.insert(place_to_insert, ShowDetailedSecondVerification) steps.insert(place_to_insert, ShowSecondVerification) steps.insert(place_to_insert, ReRunVerification) results = collections.OrderedDict() rapi = api.API() for step_cls in steps: step = step_cls(args, rapi=rapi) if step.check(results): step.run() results[step_cls] = step return results.values() def create_report(results): template_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "pages") loader = jinja2.FileSystemLoader(template_dir) env = jinja2.Environment(loader=loader) template = env.get_template("verify-index.html") with open(os.path.join(Step.BASE_DIR, "extra/index.html"), "w") as f: f.write(template.render(steps=results)) def main(): parser = argparse.ArgumentParser(description="Launch rally-verify job.") parser.add_argument("--mode", type=str, default="light", help="Mode of job. The 'full' mode corresponds to the " "full set of verifier tests. The 'light' mode " "corresponds to the smoke set of verifier tests.", choices=["light", "full"]) parser.add_argument("--compare", action="store_true", help="Start the second verification to generate a " "trends report for two verifications.") # TODO(ylobankov): Remove hard-coded Tempest related things and make it # configurable. parser.add_argument("--ctx-create-resources", action="store_true", help="Make Tempest context create needed resources " "for the tests.") args = parser.parse_args() steps = run(args) results = [step.to_html() for step in steps] create_report(results) if len([None for step in steps if step.result["status"] == Status.PASS]) == len(steps): return 0 return 1 if __name__ == "__main__": sys.exit(main())
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,843
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/manila/test_shares.py
# Copyright 2015 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from rally import exceptions from rally_openstack.task.scenarios.manila import shares from tests.unit import test @ddt.ddt class ManilaSharesTestCase(test.ScenarioTestCase): @ddt.data( {"share_proto": "nfs", "size": 3}, {"share_proto": "cifs", "size": 4, "share_network": "foo", "share_type": "bar"}, ) def test_create_and_delete_share(self, params): fake_share = mock.MagicMock() scenario = shares.CreateAndDeleteShare(self.context) scenario._create_share = mock.MagicMock(return_value=fake_share) scenario.sleep_between = mock.MagicMock() scenario._delete_share = mock.MagicMock() scenario.run(min_sleep=3, max_sleep=4, **params) scenario._create_share.assert_called_once_with(**params) scenario.sleep_between.assert_called_once_with(3, 4) scenario._delete_share.assert_called_once_with(fake_share) def create_env(self, scenario): fake_share = mock.MagicMock() scenario = shares.CreateShareAndAccessFromVM(self.context) self.ip = {"id": "foo_id", "ip": "foo_ip", "is_floating": True} scenario._boot_server_with_fip = mock.Mock( return_value=("foo_server", self.ip)) scenario._delete_server_with_fip = mock.Mock() scenario._run_command = mock.MagicMock( return_value=(0, "{\"foo\": 42}", "foo_err")) scenario.add_output = mock.Mock() self.context.update({"user": {"keypair": {"name": "keypair_name"}, "credential": mock.MagicMock()}}) scenario._create_share = mock.MagicMock(return_value=fake_share) scenario._delete_share = mock.MagicMock() scenario._export_location = mock.MagicMock(return_value="fake") scenario._allow_access_share = mock.MagicMock() return scenario, fake_share @ddt.data( {"image": "some_image", "flavor": "m1.small", "username": "chuck norris"} ) @mock.patch("rally.task.utils.get_from_manager") @mock.patch("rally.task.utils.wait_for_status") def test_create_share_and_access_from_vm( self, params, mock_rally_task_utils_wait_for_status, mock_rally_task_utils_get_from_manager): scenario, fake_share = self.create_env( shares.CreateShareAndAccessFromVM(self.context)) scenario.run(**params) scenario._create_share.assert_called_once_with( share_proto="nfs", size=1) scenario._delete_share.assert_called_once_with(fake_share) scenario._allow_access_share.assert_called_once_with( fake_share, "ip", "foo_ip", "rw") scenario._export_location.assert_called_once_with(fake_share) scenario._boot_server_with_fip.assert_called_once_with( "some_image", "m1.small", use_floating_ip=True, floating_network=None, key_name="keypair_name", userdata="#cloud-config\npackages:\n - nfs-common") mock_rally_task_utils_wait_for_status.assert_called_once_with( "foo_server", ready_statuses=["ACTIVE"], update_resource=mock.ANY) scenario._delete_server_with_fip.assert_called_once_with( "foo_server", {"id": "foo_id", "ip": "foo_ip", "is_floating": True}, force_delete=False) scenario.add_output.assert_called_with( complete={"chart_plugin": "TextArea", "data": [ "foo_err"], "title": "Script StdErr"}) @ddt.data( {"image": "some_image", "flavor": "m1.small", "username": "chuck norris"} ) @mock.patch("rally.task.utils.get_from_manager") @mock.patch("rally.task.utils.wait_for_status") def test_create_share_and_access_from_vm_command_timeout( self, params, mock_rally_task_utils_wait_for_status, mock_rally_task_utils_get_from_manager): scenario, fake_share = self.create_env( shares.CreateShareAndAccessFromVM(self.context)) scenario._run_command.side_effect = exceptions.SSHTimeout() self.assertRaises(exceptions.SSHTimeout, scenario.run, "foo_flavor", "foo_image", "foo_interpreter", "foo_script", "foo_username") scenario._delete_server_with_fip.assert_called_once_with( "foo_server", self.ip, force_delete=False) self.assertFalse(scenario.add_output.called) scenario._delete_share.assert_called_once_with(fake_share) @ddt.data( {"image": "some_image", "flavor": "m1.small", "username": "chuck norris"} ) @mock.patch("rally.task.utils.get_from_manager") @mock.patch("rally.task.utils.wait_for_status") def test_create_share_and_access_from_vm_wait_timeout( self, params, mock_rally_task_utils_wait_for_status, mock_rally_task_utils_get_from_manager): scenario, fake_share = self.create_env( shares.CreateShareAndAccessFromVM(self.context)) mock_rally_task_utils_wait_for_status.side_effect = \ exceptions.TimeoutException( resource_type="foo_resource", resource_name="foo_name", resource_id="foo_id", desired_status="foo_desired_status", resource_status="foo_resource_status", timeout=2) self.assertRaises(exceptions.TimeoutException, scenario.run, "foo_flavor", "foo_image", "foo_interpreter", "foo_script", "foo_username") scenario._delete_server_with_fip.assert_called_once_with( "foo_server", self.ip, force_delete=False) self.assertFalse(scenario.add_output.called) scenario._delete_share.assert_called_once_with(fake_share) @ddt.data( {"output": (0, "", ""), "expected": [{"complete": {"chart_plugin": "TextArea", "data": [""], "title": "Script StdOut"}}]}, {"output": (1, "x y z", "error message"), "raises": exceptions.ScriptError}, {"output": (0, "[1, 2, 3, 4]", ""), "expected": []} ) @ddt.unpack def test_create_share_and_access_from_vm_add_output(self, output, expected=None, raises=None): scenario, fake_share = self.create_env( shares.CreateShareAndAccessFromVM(self.context)) scenario._run_command.return_value = output kwargs = {"flavor": "foo_flavor", "image": "foo_image", "username": "foo_username", "password": "foo_password", "use_floating_ip": "use_fip", "floating_network": "ext_network", "force_delete": "foo_force"} if raises: self.assertRaises(raises, scenario.run, **kwargs) self.assertFalse(scenario.add_output.called) else: scenario.run(**kwargs) calls = [mock.call(**kw) for kw in expected] scenario.add_output.assert_has_calls(calls, any_order=True) scenario._create_share.assert_called_once_with( share_proto="nfs", size=1) scenario._delete_share.assert_called_once_with(fake_share) scenario._allow_access_share.assert_called_once_with( fake_share, "ip", "foo_ip", "rw") scenario._export_location.assert_called_once_with(fake_share) scenario._boot_server_with_fip.assert_called_once_with( "foo_image", "foo_flavor", use_floating_ip="use_fip", floating_network="ext_network", key_name="keypair_name", userdata="#cloud-config\npackages:\n - nfs-common") scenario._delete_server_with_fip.assert_called_once_with( "foo_server", {"id": "foo_id", "ip": "foo_ip", "is_floating": True}, force_delete="foo_force") @ddt.data( {}, {"detailed": True}, {"detailed": False}, {"search_opts": None}, {"search_opts": {}}, {"search_opts": {"foo": "bar"}}, {"detailed": True, "search_opts": None}, {"detailed": False, "search_opts": None}, {"detailed": True, "search_opts": {"foo": "bar"}}, {"detailed": False, "search_opts": {"quuz": "foo"}}, ) @ddt.unpack def test_list_shares(self, detailed=True, search_opts=None): scenario = shares.ListShares(self.context) scenario._list_shares = mock.MagicMock() scenario.run(detailed=detailed, search_opts=search_opts) scenario._list_shares.assert_called_once_with( detailed=detailed, search_opts=search_opts) @ddt.data( {"params": {"share_proto": "nfs"}, "new_size": 4}, { "params": { "share_proto": "cifs", "size": 4, "snapshot_id": "snapshot_foo", "description": "foo_description", "metadata": {"foo_metadata": "foo"}, "share_network": "foo_network", "share_type": "foo_type", "is_public": True, "availability_zone": "foo_avz", "share_group_id": "foo_group_id" }, "new_size": 8 } ) @ddt.unpack def test_create_and_extend_shares(self, params, new_size): size = params.get("size", 1) share_group_id = params.get("share_group_id", None) snapshot_id = params.get("snapshot_id", None) description = params.get("description", None) metadata = params.get("metadata", None) share_network = params.get("share_network", None) share_type = params.get("share_type", None) is_public = params.get("is_public", False) availability_zone = params.get("availability_zone", None) fake_share = mock.MagicMock() scenario = shares.CreateAndExtendShare(self.context) scenario._create_share = mock.MagicMock(return_value=fake_share) scenario._extend_share = mock.MagicMock() scenario.run(new_size=new_size, **params) scenario._create_share.assert_called_with( share_proto=params["share_proto"], size=size, snapshot_id=snapshot_id, description=description, metadata=metadata, share_network=share_network, share_type=share_type, is_public=is_public, availability_zone=availability_zone, share_group_id=share_group_id ) scenario._extend_share.assert_called_with(fake_share, new_size) @ddt.data( {"params": {"share_proto": "nfs"}, "new_size": 4}, { "params": { "share_proto": "cifs", "size": 4, "snapshot_id": "snapshot_foo", "description": "foo_description", "metadata": {"foo_metadata": "foo"}, "share_network": "foo_network", "share_type": "foo_type", "is_public": True, "availability_zone": "foo_avz", "share_group_id": "foo_group_id" }, "new_size": 8 } ) @ddt.unpack def test_create_and_shrink_shares(self, params, new_size): size = params.get("size", 2) share_group_id = params.get("share_group_id", None) snapshot_id = params.get("snapshot_id", None) description = params.get("description", None) metadata = params.get("metadata", None) share_network = params.get("share_network", None) share_type = params.get("share_type", None) is_public = params.get("is_public", False) availability_zone = params.get("availability_zone", None) fake_share = mock.MagicMock() scenario = shares.CreateAndShrinkShare(self.context) scenario._create_share = mock.MagicMock(return_value=fake_share) scenario._shrink_share = mock.MagicMock() scenario.run(new_size=new_size, **params) scenario._create_share.assert_called_with( share_proto=params["share_proto"], size=size, snapshot_id=snapshot_id, description=description, metadata=metadata, share_network=share_network, share_type=share_type, is_public=is_public, availability_zone=availability_zone, share_group_id=share_group_id ) scenario._shrink_share.assert_called_with(fake_share, new_size) @ddt.data( { "share_proto": "nfs", "size": 3, "access": "127.0.0.1", "access_type": "ip" }, { "access": "1.2.3.4", "access_type": "ip", "access_level": "ro", "share_proto": "cifs", "size": 4, "snapshot_id": "snapshot_foo", "description": "foo_description", "metadata": {"foo_metadata": "foo"}, "share_network": "foo_network", "share_type": "foo_type", "is_public": True, "availability_zone": "foo_avz", "share_group_id": "foo_group_id" } ) def test_create_share_and_allow_and_deny_access(self, params): access = params["access"] access_type = params["access_type"] access_level = params.get("access_level", "rw") size = params.get("size", 1) share_group_id = params.get("share_group_id", None) snapshot_id = params.get("snapshot_id", None) description = params.get("description", None) metadata = params.get("metadata", None) share_network = params.get("share_network", None) share_type = params.get("share_type", None) is_public = params.get("is_public", False) availability_zone = params.get("availability_zone", None) fake_share = mock.MagicMock() fake_access = {"id": "foo"} scenario = shares.CreateShareThenAllowAndDenyAccess(self.context) scenario._create_share = mock.MagicMock(return_value=fake_share) scenario._allow_access_share = mock.MagicMock(return_value=fake_access) scenario._deny_access_share = mock.MagicMock() scenario.run(**params) scenario._create_share.assert_called_with( share_proto=params["share_proto"], size=size, snapshot_id=snapshot_id, description=description, metadata=metadata, share_network=share_network, share_type=share_type, is_public=is_public, availability_zone=availability_zone, share_group_id=share_group_id ) scenario._allow_access_share.assert_called_with( fake_share, access_type, access, access_level) scenario._deny_access_share.assert_called_with( fake_share, fake_access["id"]) @ddt.data( {}, {"description": "foo_description"}, {"neutron_net_id": "foo_neutron_net_id"}, {"neutron_subnet_id": "foo_neutron_subnet_id"}, {"nova_net_id": "foo_nova_net_id"}, {"description": "foo_description", "neutron_net_id": "foo_neutron_net_id", "neutron_subnet_id": "foo_neutron_subnet_id", "nova_net_id": "foo_nova_net_id"}, ) def test_create_share_network_and_delete(self, params): fake_sn = mock.MagicMock() scenario = shares.CreateShareNetworkAndDelete(self.context) scenario._create_share_network = mock.MagicMock(return_value=fake_sn) scenario._delete_share_network = mock.MagicMock() expected_params = { "description": None, "neutron_net_id": None, "neutron_subnet_id": None, "nova_net_id": None, } expected_params.update(params) scenario.run(**params) scenario._create_share_network.assert_called_once_with( **expected_params) scenario._delete_share_network.assert_called_once_with(fake_sn) @ddt.data( {}, {"description": "foo_description"}, {"neutron_net_id": "foo_neutron_net_id"}, {"neutron_subnet_id": "foo_neutron_subnet_id"}, {"nova_net_id": "foo_nova_net_id"}, {"description": "foo_description", "neutron_net_id": "foo_neutron_net_id", "neutron_subnet_id": "foo_neutron_subnet_id", "nova_net_id": "foo_nova_net_id"}, ) def test_create_share_network_and_list(self, params): scenario = shares.CreateShareNetworkAndList(self.context) fake_network = mock.Mock() scenario._create_share_network = mock.Mock( return_value=fake_network) scenario._list_share_networks = mock.Mock( return_value=[fake_network, mock.Mock(), mock.Mock()]) expected_create_params = { "description": params.get("description"), "neutron_net_id": params.get("neutron_net_id"), "neutron_subnet_id": params.get("neutron_subnet_id"), "nova_net_id": params.get("nova_net_id"), } expected_list_params = { "detailed": params.get("detailed", True), "search_opts": params.get("search_opts"), } expected_create_params.update(params) scenario.run(**params) scenario._create_share_network.assert_called_once_with( **expected_create_params) scenario._list_share_networks.assert_called_once_with( **expected_list_params) @ddt.data( {}, {"search_opts": None}, {"search_opts": {}}, {"search_opts": {"foo": "bar"}}, ) def test_list_share_servers(self, search_opts): scenario = shares.ListShareServers(self.context) scenario.context = {"admin": {"credential": "fake_credential"}} scenario._list_share_servers = mock.MagicMock() scenario.run(search_opts=search_opts) scenario._list_share_servers.assert_called_once_with( search_opts=search_opts) @ddt.data( {"security_service_type": "fake_type"}, {"security_service_type": "fake_type", "dns_ip": "fake_dns_ip", "server": "fake_server", "domain": "fake_domain", "user": "fake_user", "password": "fake_password", "description": "fake_description"}, ) def test_create_security_service_and_delete(self, params): fake_ss = mock.MagicMock() scenario = shares.CreateSecurityServiceAndDelete(self.context) scenario._create_security_service = mock.MagicMock( return_value=fake_ss) scenario._delete_security_service = mock.MagicMock() expected_params = { "security_service_type": params.get("security_service_type"), "dns_ip": params.get("dns_ip"), "server": params.get("server"), "domain": params.get("domain"), "user": params.get("user"), "password": params.get("password"), "description": params.get("description"), } scenario.run(**params) scenario._create_security_service.assert_called_once_with( **expected_params) scenario._delete_security_service.assert_called_once_with(fake_ss) @ddt.data("ldap", "kerberos", "active_directory") def test_attach_security_service_to_share_network(self, security_service_type): scenario = shares.AttachSecurityServiceToShareNetwork(self.context) scenario._create_share_network = mock.MagicMock() scenario._create_security_service = mock.MagicMock() scenario._add_security_service_to_share_network = mock.MagicMock() scenario.run(security_service_type=security_service_type) scenario._create_share_network.assert_called_once_with() scenario._create_security_service.assert_called_once_with( security_service_type=security_service_type) scenario._add_security_service_to_share_network.assert_has_calls([ mock.call(scenario._create_share_network.return_value, scenario._create_security_service.return_value)]) @ddt.data( {"share_proto": "nfs", "size": 3, "detailed": True}, {"share_proto": "cifs", "size": 4, "detailed": False, "share_network": "foo", "share_type": "bar"}, ) def test_create_and_list_share(self, params): scenario = shares.CreateAndListShare() scenario._create_share = mock.MagicMock() scenario.sleep_between = mock.MagicMock() scenario._list_shares = mock.MagicMock() scenario.run(min_sleep=3, max_sleep=4, **params) detailed = params.pop("detailed") scenario._create_share.assert_called_once_with(**params) scenario.sleep_between.assert_called_once_with(3, 4) scenario._list_shares.assert_called_once_with(detailed=detailed) @ddt.data( ({}, 0, 0), ({}, 1, 1), ({}, 2, 2), ({}, 3, 0), ({"sets": 5, "set_size": 8, "delete_size": 10}, 1, 1), ) @ddt.unpack def test_set_and_delete_metadata(self, params, iteration, share_number): scenario = shares.SetAndDeleteMetadata() share_list = [{"id": "fake_share_%s_id" % d} for d in range(3)] scenario.context = {"tenant": {"shares": share_list}} scenario.context["iteration"] = iteration scenario._set_metadata = mock.MagicMock() scenario._delete_metadata = mock.MagicMock() expected_set_params = { "share": share_list[share_number], "sets": params.get("sets", 10), "set_size": params.get("set_size", 3), "key_min_length": params.get("key_min_length", 1), "key_max_length": params.get("key_max_length", 256), "value_min_length": params.get("value_min_length", 1), "value_max_length": params.get("value_max_length", 1024), } scenario.run(**params) scenario._set_metadata.assert_called_once_with(**expected_set_params) scenario._delete_metadata.assert_called_once_with( share=share_list[share_number], keys=scenario._set_metadata.return_value, delete_size=params.get("delete_size", 3), )
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,844
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/cleanup/manager.py
# Copyright 2014: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from rally.common import broker from rally.common import logging from rally.common.plugin import discover from rally.common.plugin import plugin from rally.common import utils as rutils from rally_openstack.task.cleanup import base LOG = logging.getLogger(__name__) class SeekAndDestroy(object): def __init__(self, manager_cls, admin, users, resource_classes=None, task_id=None): """Resource deletion class. This class contains method exterminate() that finds and deletes all resources created by Rally. :param manager_cls: subclass of base.ResourceManager :param admin: admin credential like in context["admin"] :param users: users credentials like in context["users"] :param resource_classes: Resource classes to match resource names against :param task_id: The UUID of task to match resource names against """ self.manager_cls = manager_cls self.admin = admin self.users = users or [] self.resource_classes = resource_classes or [ rutils.RandomNameGeneratorMixin] self.task_id = task_id def _get_cached_client(self, user): """Simplifies initialization and caching OpenStack clients.""" if not user: return None # NOTE(astudenov): Credential now supports caching by default return user["credential"].clients() def _delete_single_resource(self, resource): """Safe resource deletion with retries and timeouts. Send request to delete resource, in case of failures repeat it few times. After that pull status of resource until it's deleted. Writes in LOG warning with UUID of resource that wasn't deleted :param resource: instance of resource manager initiated with resource that should be deleted. """ msg_kw = { "uuid": resource.id(), "name": resource.name() or "", "service": resource._service, "resource": resource._resource } LOG.debug( "Deleting %(service)s.%(resource)s object %(name)s (%(uuid)s)" % msg_kw) try: rutils.retry(resource._max_attempts, resource.delete) except Exception as e: msg = ("Resource deletion failed, max retries exceeded for " "%(service)s.%(resource)s: %(uuid)s.") % msg_kw if logging.is_debug(): LOG.exception(msg) else: LOG.warning("%(msg)s Reason: %(e)s" % {"msg": msg, "e": e}) else: started = time.time() failures_count = 0 while time.time() - started < resource._timeout: try: if resource.is_deleted(): return except Exception: LOG.exception( "Seems like %s.%s.is_deleted(self) method is broken " "It shouldn't raise any exceptions." % (resource.__module__, type(resource).__name__)) # NOTE(boris-42): Avoid LOG spamming in case of bad # is_deleted() method failures_count += 1 if failures_count > resource._max_attempts: break finally: rutils.interruptable_sleep(resource._interval) LOG.warning("Resource deletion failed, timeout occurred for " "%(service)s.%(resource)s: %(uuid)s." % msg_kw) def _publisher(self, queue): """Publisher for deletion jobs. This method iterates over all users, lists all resources (using manager_cls) and puts jobs for deletion. Every deletion job contains tuple with two values: user and resource uuid that should be deleted. In case of tenant based resource, uuids are fetched only from one user per tenant. """ def _publish(admin, user, manager): try: for raw_resource in rutils.retry(3, manager.list): queue.append((admin, user, raw_resource)) except Exception: LOG.exception( "Seems like %s.%s.list(self) method is broken. " "It shouldn't raise any exceptions." % (manager.__module__, type(manager).__name__)) if self.admin and (not self.users or self.manager_cls._perform_for_admin_only): manager = self.manager_cls( admin=self._get_cached_client(self.admin)) _publish(self.admin, None, manager) else: visited_tenants = set() admin_client = self._get_cached_client(self.admin) for user in self.users: if (self.manager_cls._tenant_resource and user["tenant_id"] in visited_tenants): continue visited_tenants.add(user["tenant_id"]) manager = self.manager_cls( admin=admin_client, user=self._get_cached_client(user), tenant_uuid=user["tenant_id"]) _publish(self.admin, user, manager) def _consumer(self, cache, args): """Method that consumes single deletion job.""" admin, user, raw_resource = args manager = self.manager_cls( resource=raw_resource, admin=self._get_cached_client(admin), user=self._get_cached_client(user), tenant_uuid=user and user["tenant_id"]) if (isinstance(manager.name(), base.NoName) or rutils.name_matches_object( manager.name(), *self.resource_classes, task_id=self.task_id, exact=False)): self._delete_single_resource(manager) def exterminate(self): """Delete all resources for passed users, admin and resource_mgr.""" broker.run(self._publisher, self._consumer, consumers_count=self.manager_cls._threads) def list_resource_names(admin_required=None): """List all resource managers names. Returns all service names and all combination of service.resource names. :param admin_required: None -> returns all ResourceManagers True -> returns only admin ResourceManagers False -> returns only non admin ResourceManagers """ res_mgrs = discover.itersubclasses(base.ResourceManager) if admin_required is not None: res_mgrs = filter(lambda cls: cls._admin_required == admin_required, res_mgrs) names = set() for cls in res_mgrs: names.add(cls._service) names.add("%s.%s" % (cls._service, cls._resource)) return names def find_resource_managers(names=None, admin_required=None): """Returns resource managers. :param names: List of names in format <service> or <service>.<resource> that is used for filtering resource manager classes :param admin_required: None -> returns all ResourceManagers True -> returns only admin ResourceManagers False -> returns only non admin ResourceManagers """ names = set(names or []) resource_managers = [] for manager in discover.itersubclasses(base.ResourceManager): if admin_required is not None: if admin_required != manager._admin_required: continue if (manager._service in names or "%s.%s" % (manager._service, manager._resource) in names): resource_managers.append(manager) resource_managers.sort(key=lambda x: x._order) found_names = set() for mgr in resource_managers: found_names.add(mgr._service) found_names.add("%s.%s" % (mgr._service, mgr._resource)) missing = names - found_names if missing: LOG.warning("Missing resource managers: %s" % ", ".join(missing)) return resource_managers def cleanup(names=None, admin_required=None, admin=None, users=None, superclass=plugin.Plugin, task_id=None): """Generic cleaner. This method goes through all plugins. Filter those and left only plugins with _service from services or _resource from resources. Then goes through all passed users and using cleaners cleans all related resources. :param names: Use only resource managers that have names in this list. There are in as _service or (%s.%s % (_service, _resource)) from :param admin_required: If None -> return all plugins If True -> return only admin plugins If False -> return only non admin plugins :param admin: rally.deployment.credential.Credential that corresponds to OpenStack admin. :param users: List of OpenStack users that was used during testing. Every user has next structure: { "id": <uuid1>, "tenant_id": <uuid2>, "credential": <rally.deployment.credential.Credential> } :param superclass: The plugin superclass to perform cleanup for. E.g., this could be ``rally.task.scenario.Scenario`` to cleanup all Scenario resources. :param task_id: The UUID of task """ resource_classes = [cls for cls in discover.itersubclasses(superclass) if issubclass(cls, rutils.RandomNameGeneratorMixin)] if not resource_classes and issubclass(superclass, rutils.RandomNameGeneratorMixin): resource_classes.append(superclass) for manager in find_resource_managers(names, admin_required): LOG.debug("Cleaning up %(service)s %(resource)s objects" % {"service": manager._service, "resource": manager._resource}) SeekAndDestroy(manager, admin, users, resource_classes=resource_classes, task_id=task_id).exterminate()
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,845
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/ironic/test_utils.py
# Copyright 2015: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally_openstack.task.scenarios.ironic import utils from tests.unit import test IRONIC_UTILS = "rally_openstack.task.scenarios.ironic.utils" class IronicScenarioTestCase(test.ScenarioTestCase): @mock.patch("%s.utils.wait_for_status" % IRONIC_UTILS) def test__create_node(self, mock_wait_for_status): self.admin_clients("ironic").node.create.return_value = "fake_node" scenario = utils.IronicScenario(self.context) scenario.generate_random_name = mock.Mock() scenario._create_node(driver="fake", properties="fake_prop", fake_param="foo") self.admin_clients("ironic").node.create.assert_called_once_with( driver="fake", properties="fake_prop", fake_param="foo", name=scenario.generate_random_name.return_value) self.assertTrue(mock_wait_for_status.called) self._test_atomic_action_timer(scenario.atomic_actions(), "ironic.create_node") @mock.patch("%s.utils.wait_for_status" % IRONIC_UTILS) def test__delete_node(self, mock_wait_for_status): mock_node_delete = mock.Mock() self.admin_clients("ironic").node.delete = mock_node_delete scenario = utils.IronicScenario(self.context) scenario._delete_node(mock.Mock(uuid="fake_id")) self.assertTrue(mock_wait_for_status.called) self.admin_clients("ironic").node.delete.assert_called_once_with( "fake_id") self._test_atomic_action_timer(scenario.atomic_actions(), "ironic.delete_node") def test__list_nodes(self): self.admin_clients("ironic").node.list.return_value = ["fake"] scenario = utils.IronicScenario(self.context) fake_params = { "sort_dir": "foo1", "associated": "foo2", "detail": True, "maintenance": "foo5" } return_nodes_list = scenario._list_nodes(**fake_params) self.assertEqual(["fake"], return_nodes_list) self.admin_clients("ironic").node.list.assert_called_once_with( sort_dir="foo1", associated="foo2", detail=True, maintenance="foo5") self._test_atomic_action_timer(scenario.atomic_actions(), "ironic.list_nodes")
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,846
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/contexts/magnum/ca_certs.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from rally.common import utils as rutils from rally.common import validation from rally_openstack.common import consts from rally_openstack.task import context from rally_openstack.task.scenarios.magnum import utils as magnum_utils @validation.add("required_platform", platform="openstack", users=True) @context.configure(name="ca_certs", platform="openstack", order=490) class CaCertGenerator(context.OpenStackContext): """Creates ca certs.""" CONFIG_SCHEMA = { "type": "object", "$schema": consts.JSON_SCHEMA, "properties": { "directory": { "type": "string", } }, "additionalProperties": False } def _generate_csr_and_key(self): """Return a dict with a new csr and key.""" from cryptography.hazmat import backends from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives import serialization from cryptography import x509 from cryptography.x509.oid import NameOID key = rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=backends.default_backend()) csr = x509.CertificateSigningRequestBuilder().subject_name( x509.Name([ x509.NameAttribute(NameOID.COMMON_NAME, u"admin"), x509.NameAttribute(NameOID.ORGANIZATION_NAME, u"system:masters") ])).sign(key, hashes.SHA256(), backends.default_backend()) result = { "csr": csr.public_bytes(encoding=serialization.Encoding.PEM), "key": key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption()), } return result def setup(self): for user, tenant_id in self._iterate_per_tenants(): magnum_scenario = magnum_utils.MagnumScenario({ "user": user, "task": self.context["task"], "config": {"api_versions": self.context["config"].get( "api_versions", [])} }) # get the cluster and cluster_template cluster_uuid = str(self.context["tenants"][tenant_id]["cluster"]) cluster = magnum_scenario._get_cluster(cluster_uuid) cluster_template = magnum_scenario._get_cluster_template( cluster.cluster_template_id) if not cluster_template.tls_disabled: tls = self._generate_csr_and_key() dir = "" if self.config.get("directory") is not None: dir = self.config.get("directory") self.context["ca_certs_directory"] = dir fname = os.path.join(dir, cluster_uuid + ".key") with open(fname, "w") as key_file: key_file.write(tls["key"]) # get CA certificate for this cluster ca_cert = magnum_scenario._get_ca_certificate(cluster_uuid) fname = os.path.join(dir, cluster_uuid + "_ca.crt") with open(fname, "w") as ca_cert_file: ca_cert_file.write(ca_cert.pem) # send csr to Magnum to have it signed csr_req = {"cluster_uuid": cluster_uuid, "csr": tls["csr"]} cert = magnum_scenario._create_ca_certificate(csr_req) fname = os.path.join(dir, cluster_uuid + ".crt") with open(fname, "w") as cert_file: cert_file.write(cert.pem) def cleanup(self): for user, tenant_id in rutils.iterate_per_tenants( self.context["users"]): magnum_scenario = magnum_utils.MagnumScenario({ "user": user, "task": self.context["task"], "config": {"api_versions": self.context["config"].get( "api_versions", [])} }) # get the cluster and cluster_template cluster_uuid = str(self.context["tenants"][tenant_id]["cluster"]) cluster = magnum_scenario._get_cluster(cluster_uuid) cluster_template = magnum_scenario._get_cluster_template( cluster.cluster_template_id) if not cluster_template.tls_disabled: dir = self.context["ca_certs_directory"] fname = os.path.join(dir, cluster_uuid + ".key") os.remove(fname) fname = os.path.join(dir, cluster_uuid + "_ca.crt") os.remove(fname) fname = os.path.join(dir, cluster_uuid + ".crt") os.remove(fname)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,847
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/nova/test_hypervisors.py
# Copyright 2013 Cisco Systems Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally_openstack.task.scenarios.nova import hypervisors from tests.unit import test class NovaHypervisorsTestCase(test.ScenarioTestCase): def test_list_hypervisors(self): scenario = hypervisors.ListHypervisors(self.context) scenario._list_hypervisors = mock.Mock() scenario.run(detailed=False) scenario._list_hypervisors.assert_called_once_with(False) def test_list_and_get_hypervisors(self): scenario = hypervisors.ListAndGetHypervisors(self.context) scenario._list_hypervisors = mock.MagicMock(detailed=False) scenario._get_hypervisor = mock.MagicMock() scenario.run(detailed=False) scenario._list_hypervisors.assert_called_once_with(False) for hypervisor in scenario._list_hypervisors.return_value: scenario._get_hypervisor.assert_called_once_with(hypervisor) def test_statistics_hypervisors(self): scenario = hypervisors.StatisticsHypervisors(self.context) scenario._statistics_hypervisors = mock.Mock() scenario.run() scenario._statistics_hypervisors.assert_called_once_with() def test_list_and_get_uptime_hypervisors(self): scenario = hypervisors.ListAndGetUptimeHypervisors(self.context) scenario._list_hypervisors = mock.MagicMock(detailed=False) scenario._uptime_hypervisor = mock.MagicMock() scenario.run(detailed=False) scenario._list_hypervisors.assert_called_once_with(False) for hypervisor in scenario._list_hypervisors.return_value: scenario._uptime_hypervisor.assert_called_once_with(hypervisor) def test_list_and_search_hypervisors(self): fake_hypervisors = [mock.Mock(hypervisor_hostname="fake_hostname")] scenario = hypervisors.ListAndSearchHypervisors(self.context) scenario._list_hypervisors = mock.MagicMock( return_value=fake_hypervisors) scenario._search_hypervisors = mock.MagicMock() scenario.run(detailed=False) scenario._list_hypervisors.assert_called_once_with(False) scenario._search_hypervisors.assert_called_once_with( "fake_hostname")
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,848
openstack/rally-openstack
refs/heads/master
/tests/unit/task/contexts/sahara/test_sahara_job_binaries.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally_openstack.task.contexts.sahara import sahara_job_binaries from rally_openstack.task.scenarios.sahara import utils as sahara_utils from tests.unit import test CTX = "rally_openstack.task.contexts.sahara" class SaharaJobBinariesTestCase(test.ScenarioTestCase): def setUp(self): super(SaharaJobBinariesTestCase, self).setUp() self.tenants_num = 2 self.users_per_tenant = 2 self.users = self.tenants_num * self.users_per_tenant self.task = mock.MagicMock() self.tenants = {} self.users_key = [] for i in range(self.tenants_num): self.tenants[str(i)] = {"id": str(i), "name": str(i), "sahara": {"image": "42"}} for j in range(self.users_per_tenant): self.users_key.append({"id": "%s_%s" % (str(i), str(j)), "tenant_id": str(i), "credential": "credential"}) self.user_key = [{"id": i, "tenant_id": j, "credential": "credential"} for j in range(self.tenants_num) for i in range(self.users_per_tenant)] self.context.update({ "config": { "users": { "tenants": self.tenants_num, "users_per_tenant": self.users_per_tenant, }, "sahara_job_binaries": { "libs": [ { "name": "test.jar", "download_url": "http://example.com/test.jar" } ], "mains": [ { "name": "test.jar", "download_url": "http://example.com/test.jar" } ] }, }, "admin": {"credential": mock.MagicMock()}, "task": mock.MagicMock(), "users": self.users_key, "tenants": self.tenants }) @mock.patch("%s.sahara_job_binaries.resource_manager.cleanup" % CTX) @mock.patch(("%s.sahara_job_binaries.SaharaJobBinaries." "download_and_save_lib") % CTX) @mock.patch("%s.sahara_job_binaries.osclients" % CTX) def test_setup_and_cleanup( self, mock_osclients, mock_sahara_job_binaries_download_and_save_lib, mock_cleanup): mock_sahara = mock_osclients.Clients(mock.MagicMock()).sahara() sahara_ctx = sahara_job_binaries.SaharaJobBinaries(self.context) download_calls = [] for i in range(self.tenants_num): download_calls.append(mock.call( sahara=mock_sahara, lib_type="mains", name="test.jar", download_url="http://example.com/test.jar", tenant_id=str(i))) download_calls.append(mock.call( sahara=mock_sahara, lib_type="libs", name="test.jar", download_url="http://example.com/test.jar", tenant_id=str(i))) sahara_ctx.setup() (mock_sahara_job_binaries_download_and_save_lib. assert_has_calls(download_calls)) sahara_ctx.cleanup() mock_cleanup.assert_called_once_with( names=["sahara.job_binary_internals", "sahara.job_binaries"], users=self.context["users"], superclass=sahara_utils.SaharaScenario, task_id=self.context["task"]["uuid"]) @mock.patch("%s.sahara_job_binaries.requests" % CTX) @mock.patch("%s.sahara_job_binaries.osclients" % CTX) def test_download_and_save_lib(self, mock_osclients, mock_requests): mock_requests.get.content.return_value = "some_binary_content" mock_sahara = mock_osclients.Clients(mock.MagicMock()).sahara() mock_sahara.job_binary_internals.create.return_value = ( mock.MagicMock(id=42)) sahara_ctx = sahara_job_binaries.SaharaJobBinaries(self.context) sahara_ctx.context["tenants"]["0"]["sahara"] = {"mains": []} sahara_ctx.context["tenants"]["0"]["sahara"]["libs"] = [] sahara_ctx.download_and_save_lib(sahara=mock_sahara, lib_type="mains", name="test_binary", download_url="http://somewhere", tenant_id="0") sahara_ctx.download_and_save_lib(sahara=mock_sahara, lib_type="libs", name="test_binary_2", download_url="http://somewhere", tenant_id="0") mock_requests.get.assert_called_once_with("http://somewhere")
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,849
openstack/rally-openstack
refs/heads/master
/tests/functional/test_cli_task.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from tests.functional import utils class TaskTestCase(unittest.TestCase): def test_specify_version_by_deployment(self): rally = utils.Rally() deployment = rally("deployment config", getjson=True) deployment["openstack"]["api_info"] = { "fakedummy": { "version": "2", "service_type": "dummyv2" } } deployment = utils.JsonTempFile(deployment) rally("deployment create --name t_create_with_api_info " "--filename %s" % deployment.filename) self.assertIn("t_create_with_api_info", rally("deployment list")) config = { "FakeDummy.openstack_api": [ { "runner": { "type": "constant", "times": 1, "concurrency": 1 } } ] } config = utils.TaskConfig(config) plugins = "tests/functional/extra/fake_dir/fake_plugin.py" rally("--plugin-paths %s task start --task %s" % ( plugins, config.filename)) def test_specify_version_by_deployment_with_existing_users(self): rally = utils.Rally() deployment = rally("deployment config", getjson=True) deployment["openstack"]["users"] = [deployment["openstack"]["admin"]] deployment["openstack"]["api_info"] = { "fakedummy": { "version": "2", "service_type": "dummyv2" } } deployment = utils.JsonTempFile(deployment) rally("deployment create --name t_create_with_api_info " "--filename %s" % deployment.filename) self.assertIn("t_create_with_api_info", rally("deployment list")) config = { "FakeDummy.openstack_api": [ { "runner": { "type": "constant", "times": 1, "concurrency": 1 } } ] } config = utils.TaskConfig(config) plugins = "tests/functional/extra/fake_dir/fake_plugin.py" rally("--plugin-paths %s task start --task %s" % ( plugins, config.filename))
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,850
openstack/rally-openstack
refs/heads/master
/tests/unit/common/services/image/test_glance_common.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid from glanceclient import exc as glance_exc from rally import exceptions from rally_openstack.common import service from rally_openstack.common.services.image import glance_common from rally_openstack.common.services.image import image from tests.unit import test class FullGlance(service.Service, glance_common.GlanceMixin): """Implementation of GlanceMixin with Service base class.""" pass class GlanceMixinTestCase(test.TestCase): def setUp(self): super(GlanceMixinTestCase, self).setUp() self.clients = mock.MagicMock() self.glance = self.clients.glance.return_value self.name_generator = mock.MagicMock() self.version = "some" self.service = FullGlance( clients=self.clients, name_generator=self.name_generator) self.service.version = self.version def test__get_client(self): self.assertEqual(self.glance, self.service._get_client()) def test_get_image(self): image = "image_id" self.assertEqual(self.glance.images.get.return_value, self.service.get_image(image)) self.glance.images.get.assert_called_once_with(image) def test_get_image_exception(self): image_id = "image_id" self.glance.images.get.side_effect = glance_exc.HTTPNotFound self.assertRaises(exceptions.GetResourceNotFound, self.service.get_image, image_id) def test_delete_image(self): image = "image_id" self.service.delete_image(image) self.glance.images.delete.assert_called_once_with(image) def test_download_image(self): image_id = "image_id" self.service.download_image(image_id) self.glance.images.data.assert_called_once_with(image_id, do_checksum=True) class FullUnifiedGlance(glance_common.UnifiedGlanceMixin, service.Service): """Implementation of UnifiedGlanceMixin with Service base class.""" pass class UnifiedGlanceMixinTestCase(test.TestCase): def setUp(self): super(UnifiedGlanceMixinTestCase, self).setUp() self.clients = mock.MagicMock() self.name_generator = mock.MagicMock() self.impl = mock.MagicMock() self.version = "some" self.service = FullUnifiedGlance( clients=self.clients, name_generator=self.name_generator) self.service._impl = self.impl self.service.version = self.version def test__unify_image(self): class Image(object): def __init__(self, visibility=None, is_public=None, status=None): self.id = uuid.uuid4() self.name = str(uuid.uuid4()) self.visibility = visibility self.is_public = is_public self.status = status visibility = "private" image_obj = Image(visibility=visibility) unified_image = self.service._unify_image(image_obj) self.assertIsInstance(unified_image, image.UnifiedImage) self.assertEqual(image_obj.id, unified_image.id) self.assertEqual(image_obj.visibility, unified_image.visibility) image_obj = Image(is_public="public") del image_obj.visibility unified_image = self.service._unify_image(image_obj) self.assertEqual(image_obj.id, unified_image.id) self.assertEqual(image_obj.is_public, unified_image.visibility) def test_get_image(self): image_id = "image_id" self.service.get_image(image=image_id) self.service._impl.get_image.assert_called_once_with(image=image_id) def test_delete_image(self): image_id = "image_id" self.service.delete_image(image_id) self.service._impl.delete_image.assert_called_once_with( image_id=image_id) def test_download_image(self): image_id = "image_id" self.service.download_image(image_id) self.service._impl.download_image.assert_called_once_with( image_id, do_checksum=True)
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,851
openstack/rally-openstack
refs/heads/master
/rally_openstack/verification/tempest/consts.py
# Copyright 2016: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import utils class _TempestApiTestSets(utils.ImmutableMixin, utils.EnumMixin): BAREMETAL = "baremetal" CLUSTERING = "clustering" COMPUTE = "compute" DATA_PROCESSING = "data_processing" DATABASE = "database" IDENTITY = "identity" IMAGE = "image" MESSAGING = "messaging" NETWORK = "network" OBJECT_STORAGE = "object_storage" ORCHESTRATION = "orchestration" TELEMETRY = "telemetry" VOLUME = "volume" class _TempestScenarioTestSets(utils.ImmutableMixin, utils.EnumMixin): SCENARIO = "scenario" class _TempestTestSets(utils.ImmutableMixin, utils.EnumMixin): FULL = "full" SMOKE = "smoke" TempestApiTestSets = _TempestApiTestSets() TempestScenarioTestSets = _TempestScenarioTestSets() TempestTestSets = _TempestTestSets()
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,852
openstack/rally-openstack
refs/heads/master
/tests/unit/task/scenarios/designate/test_basic.py
# Copyright 2014 Hewlett-Packard Development Company, L.P. # # Author: Endre Karlson <endre.karlson@hp.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from rally import exceptions from rally_openstack.task.scenarios.designate import basic from tests.unit import test BASE = "rally_openstack.task.scenarios.designate.basic" class DesignateBasicTestCase(test.ScenarioTestCase): @mock.patch("%s.CreateAndListZones._list_zones" % BASE) @mock.patch("%s.CreateAndListZones._create_zone" % BASE) def test_create_and_list_zones(self, mock__create_zone, mock__list_zones): mock__create_zone.return_value = "Area_51" mock__list_zones.return_value = ["Area_51", "Siachen", "Bagram"] # Positive case: basic.CreateAndListZones(self.context).run() mock__create_zone.assert_called_once_with() mock__list_zones.assert_called_once_with() # Negative case: zone isn't created mock__create_zone.return_value = None self.assertRaises(exceptions.RallyAssertionError, basic.CreateAndListZones(self.context).run) mock__create_zone.assert_called_with() # Negative case: created zone not in the list of available zones mock__create_zone.return_value = "HAARP" self.assertRaises(exceptions.RallyAssertionError, basic.CreateAndListZones(self.context).run) mock__create_zone.assert_called_with() mock__list_zones.assert_called_with() @mock.patch("%s.CreateAndDeleteZone._delete_zone" % BASE) @mock.patch("%s.CreateAndDeleteZone._create_zone" % BASE, return_value={"id": "123"}) def test_create_and_delete_zone(self, mock__create_zone, mock__delete_zone): basic.CreateAndDeleteZone(self.context).run() mock__create_zone.assert_called_once_with() mock__delete_zone.assert_called_once_with("123") @mock.patch("%s.ListZones._list_zones" % BASE) def test_list_zones(self, mock_list_zones__list_zones): basic.ListZones(self.context).run() mock_list_zones__list_zones.assert_called_once_with() @mock.patch("%s.ListRecordsets._list_recordsets" % BASE) def test_list_recordsets(self, mock__list_recordsets): basic.ListRecordsets(self.context).run("123") mock__list_recordsets.assert_called_once_with("123") @mock.patch("%s.CreateAndDeleteRecordsets._delete_recordset" % BASE) @mock.patch("%s.CreateAndDeleteRecordsets._create_recordset" % BASE, return_value={"id": "321"}) def test_create_and_delete_recordsets(self, mock__create_recordset, mock__delete_recordset): zone = {"id": "1234"} self.context.update({ "tenant": { "zones": [zone] } }) recordsets_per_zone = 5 basic.CreateAndDeleteRecordsets(self.context).run( recordsets_per_zone=recordsets_per_zone) self.assertEqual(mock__create_recordset.mock_calls, [mock.call(zone)] * recordsets_per_zone) self.assertEqual(mock__delete_recordset.mock_calls, [mock.call(zone["id"], "321")] * recordsets_per_zone) @mock.patch("%s.CreateAndListRecordsets._list_recordsets" % BASE) @mock.patch("%s.CreateAndListRecordsets._create_recordset" % BASE) def test_create_and_list_recordsets(self, mock__create_recordset, mock__list_recordsets): zone = {"id": "1234"} self.context.update({ "tenant": { "zones": [zone] } }) recordsets_per_zone = 5 basic.CreateAndListRecordsets(self.context).run( recordsets_per_zone=recordsets_per_zone) self.assertEqual(mock__create_recordset.mock_calls, [mock.call(zone)] * recordsets_per_zone) mock__list_recordsets.assert_called_once_with(zone["id"])
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
24,853
openstack/rally-openstack
refs/heads/master
/rally_openstack/task/contexts/network/networking_agents.py
# Copyright 2019 Ericsson Software Technology # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from rally.common import logging from rally.common import validation from rally_openstack.common import consts from rally_openstack.common import osclients from rally_openstack.task import context LOG = logging.getLogger(__name__) @validation.add("required_platform", platform="openstack", admin=True) @context.configure(name="networking_agents", platform="openstack", order=349) class NetworkingAgents(context.OpenStackContext): """This context supports querying Neutron agents in Rally.""" CONFIG_SCHEMA = { "type": "object", "$schema": consts.JSON_SCHEMA, "additionalProperties": False, } def setup(self): nc = osclients.Clients(self.context["admin"]["credential"]).neutron() agents = nc.list_agents()["agents"] # NOTE(bence romsics): If you ever add input parameters to this context # beware that here we use the same key in self.context as is used for # parameter passing, so we'll overwrite it. self.context["networking_agents"] = agents def cleanup(self): """Neutron agents were not created by Rally, so nothing to do."""
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}