index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
24,654
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/common/services/image/test_image.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
from rally_openstack.common.services.image import glance_v1
from rally_openstack.common.services.image import glance_v2
from rally_openstack.common.services.image import image
from tests.unit import test
@ddt.ddt
class ImageTestCase(test.TestCase):
def setUp(self):
super(ImageTestCase, self).setUp()
self.clients = mock.MagicMock()
def get_service_with_fake_impl(self):
path = "rally_openstack.common.services.image.image"
with mock.patch("%s.Image.discover_impl" % path) as mock_discover:
mock_discover.return_value = mock.MagicMock(), None
service = image.Image(self.clients)
return service
@ddt.data(("image_name", "container_format", "image_location",
"disk_format", "visibility", "min_disk", "min_ram"))
def test_create_image(self, params):
(image_name, container_format, image_location, disk_format,
visibility, min_disk, min_ram) = params
service = self.get_service_with_fake_impl()
properties = {"fakeprop": "fake"}
service.create_image(image_name=image_name,
container_format=container_format,
image_location=image_location,
disk_format=disk_format,
visibility=visibility,
min_disk=min_disk,
min_ram=min_ram,
properties=properties)
service._impl.create_image.assert_called_once_with(
image_name=image_name, container_format=container_format,
image_location=image_location, disk_format=disk_format,
visibility=visibility, min_disk=min_disk, min_ram=min_ram,
properties=properties)
@ddt.data(("image_id", "image_name", "min_disk", "min_ram",
"remove_props"))
def test_update_image(self, params):
(image_id, image_name, min_disk, min_ram, remove_props) = params
service = self.get_service_with_fake_impl()
service.update_image(image_id,
image_name=image_name,
min_disk=min_disk,
min_ram=min_ram,
remove_props=remove_props)
service._impl.update_image.assert_called_once_with(
image_id, image_name=image_name, min_disk=min_disk,
min_ram=min_ram, remove_props=remove_props)
@ddt.data("image_id")
def test_get_image(self, param):
image_id = param
service = self.get_service_with_fake_impl()
service.get_image(image=image_id)
service._impl.get_image.assert_called_once_with(image_id)
@ddt.data(("status", "visibility", "owner"))
def test_list_images(self, params):
status, visibility, owner = params
service = self.get_service_with_fake_impl()
service.list_images(status=status, visibility=visibility, owner=owner)
service._impl.list_images.assert_called_once_with(
status=status, visibility=visibility, owner=owner)
@ddt.data(("image_id", "visibility"))
def test_set_visibility(self, params):
image_id, visibility = params
service = self.get_service_with_fake_impl()
service.set_visibility(image_id=image_id, visibility=visibility)
service._impl.set_visibility.assert_called_once_with(
image_id, visibility=visibility)
def test_delete_image(self):
image_id = "image_id"
service = self.get_service_with_fake_impl()
service.delete_image(image_id=image_id)
service._impl.delete_image.assert_called_once_with(image_id)
def test_download_image(self):
image_id = "image_id"
service = self.get_service_with_fake_impl()
service.download_image(image=image_id, do_checksum=True)
service._impl.download_image.assert_called_once_with(image_id,
do_checksum=True)
def test_is_applicable(self):
clients = mock.Mock()
clients.glance().version = "1.0"
self.assertTrue(
glance_v1.UnifiedGlanceV1Service.is_applicable(clients))
clients.glance().version = "2.0"
self.assertTrue(
glance_v2.UnifiedGlanceV2Service.is_applicable(clients))
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,655
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/contexts/test_api_versions.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
from rally.common import utils
from rally import exceptions
from rally.task import context
from rally_openstack.task.contexts import api_versions
from tests.unit import test
@ddt.ddt
class OpenStackServicesTestCase(test.TestCase):
def setUp(self):
super(OpenStackServicesTestCase, self).setUp()
self.mock_clients = mock.patch(
"rally_openstack.common.osclients.Clients").start()
osclient_kc = self.mock_clients.return_value.keystone
self.mock_kc = osclient_kc.return_value
self.service_catalog = osclient_kc.service_catalog
self.service_catalog.get_endpoints.return_value = []
self.mock_kc.services.list.return_value = []
@ddt.data(({"nova": {"service_type": "compute", "version": 2},
"cinder": {"service_name": "cinderv2", "version": 2},
"neutron": {"service_type": "network"},
"glance": {"service_name": "glance"},
"heat": {"version": 1}}, True),
({"nova": {"service_type": "compute",
"service_name": "nova"}}, False),
({"keystone": {"service_type": "foo"}}, False),
({"nova": {"version": "foo"}}, False),
({}, False))
@ddt.unpack
def test_validate(self, config, valid):
results = context.Context.validate("api_versions", None, None, config)
if valid:
self.assertEqual([], results)
else:
self.assertGreater(len(results), 0)
def test_setup_with_wrong_service_name(self):
context_obj = {
"config": {api_versions.OpenStackAPIVersions.get_fullname(): {
"nova": {"service_name": "service_name"}}},
"admin": {"credential": mock.MagicMock()},
"users": [{"credential": mock.MagicMock()}]}
ctx = api_versions.OpenStackAPIVersions(context_obj)
self.assertRaises(exceptions.ValidationError, ctx.setup)
self.service_catalog.get_endpoints.assert_called_once_with()
self.mock_kc.services.list.assert_called_once_with()
def test_setup_with_wrong_service_name_and_without_admin(self):
context_obj = {
"config": {api_versions.OpenStackAPIVersions.get_fullname(): {
"nova": {"service_name": "service_name"}}},
"users": [{"credential": mock.MagicMock()}]}
ctx = api_versions.OpenStackAPIVersions(context_obj)
self.assertRaises(exceptions.ContextSetupFailure, ctx.setup)
self.service_catalog.get_endpoints.assert_called_once_with()
self.assertFalse(self.mock_kc.services.list.called)
def test_setup_with_wrong_service_type(self):
context_obj = {
"config": {api_versions.OpenStackAPIVersions.get_fullname(): {
"nova": {"service_type": "service_type"}}},
"users": [{"credential": mock.MagicMock()}]}
ctx = api_versions.OpenStackAPIVersions(context_obj)
self.assertRaises(exceptions.ValidationError, ctx.setup)
self.service_catalog.get_endpoints.assert_called_once_with()
def test_setup_with_service_name(self):
self.mock_kc.services.list.return_value = [
utils.Struct(type="computev21", name="NovaV21")]
name = api_versions.OpenStackAPIVersions.get_fullname()
context = {
"config": {name: {"nova": {"service_name": "NovaV21"}}},
"admin": {"credential": mock.MagicMock()},
"users": [{"credential": mock.MagicMock()}]}
ctx = api_versions.OpenStackAPIVersions(context)
ctx.setup()
self.service_catalog.get_endpoints.assert_called_once_with()
self.mock_kc.services.list.assert_called_once_with()
versions = ctx.context["config"]["api_versions@openstack"]
self.assertEqual(
"computev21",
versions["nova"]["service_type"])
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,656
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/vm/vmtasks.py
|
# Copyright 2014: Rackspace UK
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import pkgutil
import re
from rally.common import logging
from rally.common import validation
from rally import exceptions
from rally.plugins.common import validators
from rally.task import atomic
from rally.task import types
from rally.task import utils as rally_utils
from rally.utils import sshutils
from rally_openstack.common import consts
from rally_openstack.common.services import heat
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.cinder import utils as cinder_utils
from rally_openstack.task.scenarios.vm import utils as vm_utils
"""Scenarios that are to be run inside VM instances."""
LOG = logging.getLogger(__name__)
# TODO(andreykurilin): replace by advanced jsonschema(lollipop?!) someday
@validation.configure(name="valid_command", platform="openstack")
class ValidCommandValidator(validators.FileExistsValidator):
def __init__(self, param_name, required=True):
"""Checks that parameter is a proper command-specifying dictionary.
Ensure that the command dictionary is a proper command-specifying
dictionary described in 'vmtasks.VMTasks.boot_runcommand_delete'
docstring.
:param param_name: Name of parameter to validate
:param required: Boolean indicating that the command dictionary is
required
"""
super(ValidCommandValidator, self).__init__(param_name=param_name)
self.required = required
def check_command_dict(self, command):
"""Check command-specifying dict `command'
:raises ValueError: on error
"""
if not isinstance(command, dict):
self.fail("Command must be a dictionary")
# NOTE(pboldin): Here we check for the values not for presence of the
# keys due to template-driven configuration generation that can leave
# keys defined but values empty.
if command.get("interpreter"):
script_file = command.get("script_file")
if script_file:
if "script_inline" in command:
self.fail(
"Exactly one of script_inline or script_file with "
"interpreter is expected: %r" % command)
# User tries to upload a shell? Make sure it is same as interpreter
interpreter = command.get("interpreter")
interpreter = (interpreter[-1]
if isinstance(interpreter, (tuple, list))
else interpreter)
if (command.get("local_path")
and command.get("remote_path") != interpreter):
self.fail(
"When uploading an interpreter its path should be as well"
" specified as the `remote_path' string: %r" % command)
elif not command.get("remote_path"):
# No interpreter and no remote command to execute is given
self.fail(
"Supplied dict specifies no command to execute, either "
"interpreter or remote_path is required: %r" % command)
unexpected_keys = set(command) - {"script_file", "script_inline",
"interpreter", "remote_path",
"local_path", "command_args"}
if unexpected_keys:
self.fail(
"Unexpected command parameters: %s" % ", ".join(
unexpected_keys))
def validate(self, context, config, plugin_cls, plugin_cfg):
command = config.get("args", {}).get(self.param_name)
if command is None and not self.required:
return
try:
self.check_command_dict(command)
except ValueError as e:
return self.fail(str(e))
for key in "script_file", "local_path":
if command.get(key):
self._file_access_ok(
filename=command[key], mode=os.R_OK,
param_name=self.param_name, required=self.required)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.add("image_valid_on_flavor", flavor_param="flavor",
image_param="image", fail_on_404_image=False)
@validation.add("valid_command", param_name="command")
@validation.add("number", param_name="port", minval=1, maxval=65535,
nullable=True, integer_only=True)
@validation.add("external_network_exists", param_name="floating_network")
@validation.add("required_services", services=[consts.Service.NOVA,
consts.Service.CINDER])
@validation.add("required_param_or_context",
param_name="image", ctx_name="image_command_customizer")
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["nova", "cinder"],
"keypair@openstack": {},
"allow_ssh@openstack": None},
name="VMTasks.boot_runcommand_delete",
platform="openstack")
class BootRuncommandDelete(vm_utils.VMScenario, cinder_utils.CinderBasic):
def run(self, flavor, username, password=None,
image=None,
command=None,
volume_args=None, floating_network=None, port=22,
use_floating_ip=True, force_delete=False, wait_for_ping=True,
max_log_length=None, **kwargs):
"""Boot a server, run script specified in command and delete server.
:param image: glance image name to use for the vm. Optional
in case of specified "image_command_customizer" context
:param flavor: VM flavor name
:param username: ssh username on server, str
:param password: Password on SSH authentication
:param command: Command-specifying dictionary that either specifies
remote command path via `remote_path' (can be uploaded from a
local file specified by `local_path`), an inline script via
`script_inline' or a local script file path using `script_file'.
Both `script_file' and `local_path' are checked to be accessible
by the `file_exists' validator code.
The `script_inline' and `script_file' both require an `interpreter'
value to specify the interpreter script should be run with.
Note that any of `interpreter' and `remote_path' can be an array
prefixed with environment variables and suffixed with args for
the `interpreter' command. `remote_path's last component must be
a path to a command to execute (also upload destination if a
`local_path' is given). Uploading an interpreter is possible
but requires that `remote_path' and `interpreter' path do match.
Examples:
.. code-block:: python
# Run a `local_script.pl' file sending it to a remote
# Perl interpreter
command = {
"script_file": "local_script.pl",
"interpreter": "/usr/bin/perl"
}
# Run an inline script sending it to a remote interpreter
command = {
"script_inline": "echo 'Hello, World!'",
"interpreter": "/bin/sh"
}
# Run a remote command
command = {
"remote_path": "/bin/false"
}
# Copy a local command and run it
command = {
"remote_path": "/usr/local/bin/fio",
"local_path": "/home/foobar/myfiodir/bin/fio"
}
# Copy a local command and run it with environment variable
command = {
"remote_path": ["HOME=/root", "/usr/local/bin/fio"],
"local_path": "/home/foobar/myfiodir/bin/fio"
}
# Run an inline script sending it to a remote interpreter
command = {
"script_inline": "echo \"Hello, ${NAME:-World}\"",
"interpreter": ["NAME=Earth", "/bin/sh"]
}
# Run an inline script sending it to an uploaded remote
# interpreter
command = {
"script_inline": "echo \"Hello, ${NAME:-World}\"",
"interpreter": ["NAME=Earth", "/tmp/sh"],
"remote_path": "/tmp/sh",
"local_path": "/home/user/work/cve/sh-1.0/bin/sh"
}
:param volume_args: volume args for booting server from volume
:param floating_network: external network name, for floating ip
:param port: ssh port for SSH connection
:param use_floating_ip: bool, floating or fixed IP for SSH connection
:param force_delete: whether to use force_delete for servers
:param wait_for_ping: whether to check connectivity on server creation
:param max_log_length: The number of tail nova console-log lines user
would like to retrieve
:param kwargs: extra arguments for booting the server
"""
if volume_args:
volume = self.cinder.create_volume(volume_args["size"],
imageRef=None)
kwargs["block_device_mapping"] = {"vdrally": "%s:::1" % volume.id}
if not image:
image = self.context["tenant"]["custom_image"]["id"]
server, fip = self._boot_server_with_fip(
image, flavor, use_floating_ip=use_floating_ip,
floating_network=floating_network,
key_name=self.context["user"]["keypair"]["name"],
**kwargs)
try:
if wait_for_ping:
self._wait_for_ping(fip["ip"])
code, out, err = self._run_command(
fip["ip"], port, username, password, command=command)
text_area_output = ["StdErr: %s" % (err or "(none)"),
"StdOut:"]
if code:
raise exceptions.ScriptError(
"Error running command %(command)s. "
"Error %(code)s: %(error)s" % {
"command": command, "code": code, "error": err})
# Let's try to load output data
try:
data = json.loads(out)
# 'echo 42' produces very json-compatible result
# - check it here
if not isinstance(data, dict):
raise ValueError
except ValueError:
# It's not a JSON, probably it's 'script_inline' result
data = []
except (exceptions.TimeoutException,
exceptions.SSHTimeout):
console_logs = self._get_server_console_output(server,
max_log_length)
LOG.debug("VM console logs:\n%s" % console_logs)
raise
finally:
self._delete_server_with_fip(server, fip,
force_delete=force_delete)
if isinstance(data, dict) and set(data) == {"additive", "complete"}:
for chart_type, charts in data.items():
for chart in charts:
self.add_output(**{chart_type: chart})
else:
# it's a dict with several unknown lines
text_area_output.extend(out.split("\n"))
self.add_output(complete={"title": "Script Output",
"chart_plugin": "TextArea",
"data": text_area_output})
@scenario.configure(context={"cleanup@openstack": ["nova", "heat"],
"keypair@openstack": {}, "network@openstack": {}},
name="VMTasks.runcommand_heat")
class RuncommandHeat(vm_utils.VMScenario):
def run(self, workload, template, files, parameters):
"""Run workload on stack deployed by heat.
Workload can be either file or resource:
.. code-block:: json
{"file": "/path/to/file.sh"}
{"resource": ["package.module", "workload.py"]}
Also it should contain "username" key.
Given file will be uploaded to `gate_node` and started. This script
should print `key` `value` pairs separated by colon. These pairs will
be presented in results.
Gate node should be accessible via ssh with keypair `key_name`, so
heat template should accept parameter `key_name`.
:param workload: workload to run
:param template: path to heat template file
:param files: additional template files
:param parameters: parameters for heat template
"""
keypair = self.context["user"]["keypair"]
parameters["key_name"] = keypair["name"]
network = self.context["tenant"]["networks"][0]
parameters["router_id"] = network["router_id"]
self.stack = heat.main.Stack(self, self.task,
template, files=files,
parameters=parameters)
self.stack.create()
for output in self.stack.stack.outputs:
if output["output_key"] == "gate_node":
ip = output["output_value"]
break
ssh = sshutils.SSH(workload["username"], ip, pkey=keypair["private"])
ssh.wait()
script = workload.get("resource")
if script:
script = pkgutil.get_data(*script)
else:
script = open(workload["file"]).read()
ssh.execute("cat > /tmp/.rally-workload", stdin=script)
ssh.execute("chmod +x /tmp/.rally-workload")
with atomic.ActionTimer(self, "runcommand_heat.workload"):
status, out, err = ssh.execute(
"/tmp/.rally-workload",
stdin=json.dumps(self.stack.stack.outputs))
rows = []
for line in out.splitlines():
row = line.split(":")
if len(row) != 2:
raise exceptions.ScriptError("Invalid data '%s'" % line)
rows.append(row)
if not rows:
raise exceptions.ScriptError("No data returned. Original error "
"message is %s" % err)
self.add_output(
complete={"title": "Workload summary",
"description": "Data generated by workload",
"chart_plugin": "Table",
"data": {
"cols": ["key", "value"],
"rows": rows}}
)
BASH_DD_LOAD_TEST = """
#!/bin/sh
# Load server and output JSON results ready to be processed
# by Rally scenario
for ex in awk top grep free tr df dc dd gzip
do
if ! type ${ex} >/dev/null
then
echo "Executable is required by script but not available\
on a server: ${ex}" >&2
return 1
fi
done
get_used_cpu_percent() {
echo 100\
$(top -b -n 1 | grep -i CPU | head -n 1 | awk '{print $8}' | tr -d %)\
- p | dc
}
get_used_ram_percent() {
local total=$(free | grep Mem: | awk '{print $2}')
local used=$(free | grep -- -/+\\ buffers | awk '{print $3}')
echo ${used} 100 \\* ${total} / p | dc
}
get_used_disk_percent() {
df -P / | grep -v Filesystem | awk '{print $5}' | tr -d %
}
get_seconds() {
(time -p ${1}) 2>&1 | awk '/real/{print $2}'
}
complete_load() {
local script_file=${LOAD_SCRIPT_FILE:-/tmp/load.sh}
local stop_file=${LOAD_STOP_FILE:-/tmp/load.stop}
local processes_num=${LOAD_PROCESSES_COUNT:-20}
local size=${LOAD_SIZE_MB:-5}
cat << EOF > ${script_file}
until test -e ${stop_file}
do dd if=/dev/urandom bs=1M count=${size} 2>/dev/null | gzip >/dev/null ; done
EOF
local sep
local cpu
local ram
local dis
rm -f ${stop_file}
for i in $(seq ${processes_num})
do
i=$((i-1))
sh ${script_file} &
cpu="${cpu}${sep}[${i}, $(get_used_cpu_percent)]"
ram="${ram}${sep}[${i}, $(get_used_ram_percent)]"
dis="${dis}${sep}[${i}, $(get_used_disk_percent)]"
sep=", "
done
> ${stop_file}
cat << EOF
{
"title": "Generate load by spawning processes",
"description": "Each process runs gzip for ${size}M urandom data\
in a loop",
"chart_plugin": "Lines",
"axis_label": "Number of processes",
"label": "Usage, %",
"data": [
["CPU", [${cpu}]],
["Memory", [${ram}]],
["Disk", [${dis}]]]
}
EOF
}
additive_dd() {
local c=${1:-50} # Megabytes
local file=/tmp/dd_test.img
local write=$(get_seconds "dd if=/dev/zero of=${file} bs=1M count=${c}")
local read=$(get_seconds "dd if=${file} of=/dev/null bs=1M count=${c}")
local gzip=$(get_seconds "gzip ${file}")
rm ${file}.gz
cat << EOF
{
"title": "Write, read and gzip file",
"description": "Using file '${file}', size ${c}Mb.",
"chart_plugin": "StackedArea",
"data": [
["write_${c}M", ${write}],
["read_${c}M", ${read}],
["gzip_${c}M", ${gzip}]]
},
{
"title": "Statistics for write/read/gzip",
"chart_plugin": "StatsTable",
"data": [
["write_${c}M", ${write}],
["read_${c}M", ${read}],
["gzip_${c}M", ${gzip}]]
}
EOF
}
cat << EOF
{
"additive": [$(additive_dd)],
"complete": [$(complete_load)]
}
EOF
"""
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.add("image_valid_on_flavor", flavor_param="flavor",
image_param="image")
@validation.add("number", param_name="port", minval=1, maxval=65535,
nullable=True, integer_only=True)
@validation.add("external_network_exists", param_name="floating_network")
@validation.add("required_services", services=[consts.Service.NOVA,
consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["nova", "cinder"],
"keypair@openstack": {},
"allow_ssh@openstack": None},
name="VMTasks.dd_load_test",
platform="openstack")
class DDLoadTest(BootRuncommandDelete):
@logging.log_deprecated_args(
"Use 'interpreter' to specify the interpreter to execute script from.",
"0.10.0", ["command"], once=True)
def run(self, flavor, username, password=None,
image=None, command=None, interpreter="/bin/sh",
volume_args=None, floating_network=None, port=22,
use_floating_ip=True, force_delete=False, wait_for_ping=True,
max_log_length=None, **kwargs):
"""Boot a server from a custom image and performs dd load test.
.. note:: dd load test is prepared script by Rally team. It checks
writing and reading metrics from the VM.
:param image: glance image name to use for the vm. Optional
in case of specified "image_command_customizer" context
:param flavor: VM flavor name
:param username: ssh username on server, str
:param password: Password on SSH authentication
:param interpreter: the interpreter to execute script with dd load test
(defaults to /bin/sh)
:param command: DEPRECATED. use interpreter instead.
:param volume_args: volume args for booting server from volume
:param floating_network: external network name, for floating ip
:param port: ssh port for SSH connection
:param use_floating_ip: bool, floating or fixed IP for SSH connection
:param force_delete: whether to use force_delete for servers
:param wait_for_ping: whether to check connectivity on server creation
:param max_log_length: The number of tail nova console-log lines user
would like to retrieve
:param kwargs: extra arguments for booting the server
"""
cmd = {"interpreter": interpreter,
"script_inline": BASH_DD_LOAD_TEST}
if command and "interpreter" in command:
cmd["interpreter"] = command["interpreter"]
return super(DDLoadTest, self).run(
flavor=flavor, username=username, password=password,
image=image, command=cmd,
volume_args=volume_args, floating_network=floating_network,
port=port, use_floating_ip=use_floating_ip,
force_delete=force_delete,
wait_for_ping=wait_for_ping, max_log_length=max_log_length,
**kwargs)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.add("image_valid_on_flavor", flavor_param="flavor",
image_param="image", fail_on_404_image=False)
@validation.add("number", param_name="port", minval=1, maxval=65535,
nullable=True, integer_only=True)
@validation.add("external_network_exists", param_name="floating_network")
@validation.add("required_services", services=[consts.Service.DESIGNATE,
consts.Service.NEUTRON,
consts.Service.NOVA])
@validation.add("required_contexts", contexts=["network", "zones"])
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_context_config", context_name="zones",
context_config={"set_zone_in_network": True})
@scenario.configure(context={"cleanup@openstack": ["designate",
"nova", "neutron"],
"keypair@openstack": {},
"allow_ssh@openstack": None},
name="VMTasks.check_designate_dns_resolving",
platform="openstack")
class CheckDesignateDNSResolving(vm_utils.VMScenario):
def run(self, image, flavor, username, password=None,
floating_network=None, port=22,
use_floating_ip=True, force_delete=False, max_log_length=None,
**kwargs):
"""Try to resolve hostname from VM against existing designate DNS.
- requires zone context with set_zone_in_network parameter
> zones:
> set_zone_in_network: True
- designate IP should be in default dns_nameservers list for new
networks or it can be specified in a network context
> network:
> dns_nameservers:
> - 8.8.8.8
> - 192.168.210.45
:param image: glance image name to use for the vm
:param flavor: VM flavor name
:param username: ssh username on server
:param password: Password on SSH authentication
:param floating_network: external network name, for floating ip
:param port: ssh port for SSH connection
:param use_floating_ip: bool, floating or fixed IP for SSH connection
:param force_delete: whether to use force_delete for servers
:param max_log_length: The number of tail nova console-log lines user
would like to retrieve
:param kwargs: optional args
"""
zone = self.context["tenant"]["zones"][0]["name"]
server, fip = self._boot_server_with_fip(
image, flavor, use_floating_ip=use_floating_ip,
floating_network=floating_network,
key_name=self.context["user"]["keypair"]["name"],
**kwargs)
script = f"cloud-init status -w; resolvectl status; "\
f"dig $(hostname).{zone}"
command = {
"script_inline": script,
"interpreter": "/bin/bash"
}
try:
rally_utils.wait_for_status(
server,
ready_statuses=["ACTIVE"],
update_resource=rally_utils.get_from_manager(),
)
code, out, err = self._run_command(
fip["ip"], port, username, password, command=command)
if code:
raise exceptions.ScriptError(
"Error running command %(command)s. "
"Error %(code)s: %(error)s" % {
"command": command, "code": code, "error": err})
else:
if not re.findall(".*ANSWER SECTION.*", out, re.MULTILINE):
raise exceptions.ScriptError(
f"Error running {script}. "
f"Error: Missing ANSWER section in the output {out}")
except (exceptions.TimeoutException,
exceptions.SSHTimeout):
console_logs = self._get_server_console_output(server,
max_log_length)
LOG.debug("VM console logs:\n%s" % console_logs)
raise
finally:
self._delete_server_with_fip(server, fip,
force_delete=force_delete)
self.add_output(complete={
"title": "Script StdOut",
"chart_plugin": "TextArea",
"data": str(out).split("\n")
})
if err:
self.add_output(complete={
"title": "Script StdErr",
"chart_plugin": "TextArea",
"data": err.split("\n")
})
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,657
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/barbican/utils.py
|
# Copyright 2018 Red Hat, Inc. <http://www.redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally_openstack.common.services.key_manager import barbican
from rally_openstack.task import scenario
class BarbicanBase(scenario.OpenStackScenario):
"""Base class for Barbican scenarios with basic atomic actions."""
def __init__(self, context=None, admin_context=None, clients=None):
super(BarbicanBase, self).__init__(context, admin_context, clients)
if hasattr(self, "_admin_clients"):
self.admin_barbican = barbican.BarbicanService(
self._admin_clients, name_generator=self.generate_random_name,
atomic_inst=self.atomic_actions())
if hasattr(self, "_clients"):
self.barbican = barbican.BarbicanService(
self._clients, name_generator=self.generate_random_name,
atomic_inst=self.atomic_actions())
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,658
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/keystone/basic.py
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import logging
from rally.task import validation
from rally_openstack.common.services.identity import identity
from rally_openstack.task import scenario
class KeystoneBasic(scenario.OpenStackScenario):
"""Base class for Keystone scenarios with initialized service object."""
def __init__(self, context=None, admin_clients=None, clients=None):
super(KeystoneBasic, self).__init__(context, admin_clients, clients)
if hasattr(self, "_admin_clients"):
self.admin_keystone = identity.Identity(
self._admin_clients, name_generator=self.generate_random_name,
atomic_inst=self.atomic_actions())
if hasattr(self, "_clients"):
self.keystone = identity.Identity(
self._clients, name_generator=self.generate_random_name,
atomic_inst=self.atomic_actions())
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]},
name="KeystoneBasic.create_user",
platform="openstack")
class CreateUser(KeystoneBasic):
@logging.log_deprecated_args(
"The 'name_length' argument to create_user is ignored",
"0.1.2", ["name_length"], once=True)
def run(self, name_length=10, **kwargs):
"""Create a keystone user with random name.
:param kwargs: Other optional parameters to create users like
"tenant_id", "enabled".
"""
self.admin_keystone.create_user(**kwargs)
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]},
name="KeystoneBasic.create_delete_user",
platform="openstack")
class CreateDeleteUser(KeystoneBasic):
@logging.log_deprecated_args(
"The 'name_length' argument to create_delete_user is ignored",
"0.1.2", ["name_length"], once=True)
def run(self, name_length=10, **kwargs):
"""Create a keystone user with random name and then delete it.
:param kwargs: Other optional parameters to create users like
"tenant_id", "enabled".
"""
user = self.admin_keystone.create_user(**kwargs)
self.admin_keystone.delete_user(user.id)
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]},
name="KeystoneBasic.create_user_set_enabled_and_delete",
platform="openstack")
class CreateUserSetEnabledAndDelete(KeystoneBasic):
def run(self, enabled=True, **kwargs):
"""Create a keystone user, enable or disable it, and delete it.
:param enabled: Initial state of user 'enabled' flag. The user
will be created with 'enabled' set to this
value, and then it will be toggled.
:param kwargs: Other optional parameters to create user.
"""
user = self.admin_keystone.create_user(enabled=enabled, **kwargs)
self.admin_keystone.update_user(user.id, enabled=(not enabled))
self.admin_keystone.delete_user(user.id)
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]},
name="KeystoneBasic.create_tenant",
platform="openstack")
class CreateTenant(KeystoneBasic):
@logging.log_deprecated_args(
"The 'name_length' argument to create_tenant is ignored",
"0.1.2", ["name_length"], once=True)
def run(self, name_length=10, **kwargs):
"""Create a keystone tenant with random name.
:param kwargs: Other optional parameters
"""
self.admin_keystone.create_project(**kwargs)
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]},
name="KeystoneBasic.authenticate_user_and_validate_token",
platform="openstack")
class AuthenticateUserAndValidateToken(KeystoneBasic):
def run(self):
"""Authenticate and validate a keystone token."""
token = self.admin_keystone.fetch_token()
self.admin_keystone.validate_token(token)
@validation.add("number", param_name="users_per_tenant", minval=1)
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]},
name="KeystoneBasic.create_tenant_with_users",
platform="openstack")
class CreateTenantWithUsers(KeystoneBasic):
@logging.log_deprecated_args(
"The 'name_length' argument to create_tenant_with_users is ignored",
"0.1.2", ["name_length"], once=True)
def run(self, users_per_tenant, name_length=10, **kwargs):
"""Create a keystone tenant and several users belonging to it.
:param users_per_tenant: number of users to create for the tenant
:param kwargs: Other optional parameters for tenant creation
:returns: keystone tenant instance
"""
tenant = self.admin_keystone.create_project(**kwargs)
self.admin_keystone.create_users(tenant.id,
number_of_users=users_per_tenant)
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]},
name="KeystoneBasic.create_and_list_users",
platform="openstack")
class CreateAndListUsers(KeystoneBasic):
@logging.log_deprecated_args(
"The 'name_length' argument to create_and_list_users is ignored",
"0.1.2", ["name_length"], once=True)
def run(self, name_length=10, **kwargs):
"""Create a keystone user with random name and list all users.
:param kwargs: Other optional parameters to create users like
"tenant_id", "enabled".
"""
kwargs.pop("name", None)
self.admin_keystone.create_user(**kwargs)
self.admin_keystone.list_users()
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]},
name="KeystoneBasic.create_and_list_tenants",
platform="openstack")
class CreateAndListTenants(KeystoneBasic):
@logging.log_deprecated_args(
"The 'name_length' argument to create_and_list_tenants is ignored",
"0.1.2", ["name_length"], once=True)
def run(self, name_length=10, **kwargs):
"""Create a keystone tenant with random name and list all tenants.
:param kwargs: Other optional parameters
"""
self.admin_keystone.create_project(**kwargs)
self.admin_keystone.list_projects()
@validation.add("required_platform", platform="openstack",
admin=True, users=True)
@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]},
name="KeystoneBasic.add_and_remove_user_role",
platform="openstack")
class AddAndRemoveUserRole(KeystoneBasic):
def run(self):
"""Create a user role add to a user and disassociate."""
tenant_id = self.context["tenant"]["id"]
user_id = self.context["user"]["id"]
role = self.admin_keystone.create_role()
self.admin_keystone.add_role(role_id=role.id, user_id=user_id,
project_id=tenant_id)
self.admin_keystone.revoke_role(role.id, user_id=user_id,
project_id=tenant_id)
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]},
name="KeystoneBasic.create_and_delete_role",
platform="openstack")
class CreateAndDeleteRole(KeystoneBasic):
def run(self):
"""Create a user role and delete it."""
role = self.admin_keystone.create_role()
self.admin_keystone.delete_role(role.id)
@validation.add("required_platform", platform="openstack",
admin=True, users=True)
@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]},
name="KeystoneBasic.create_add_and_list_user_roles",
platform="openstack")
class CreateAddAndListUserRoles(KeystoneBasic):
def run(self):
"""Create user role, add it and list user roles for given user."""
tenant_id = self.context["tenant"]["id"]
user_id = self.context["user"]["id"]
role = self.admin_keystone.create_role()
self.admin_keystone.add_role(user_id=user_id, role_id=role.id,
project_id=tenant_id)
self.admin_keystone.list_roles(user_id=user_id, project_id=tenant_id)
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]},
name="KeystoneBasic.get_entities",
platform="openstack")
class GetEntities(KeystoneBasic):
def run(self, service_name="keystone"):
"""Get instance of a tenant, user, role and service by id's.
An ephemeral tenant, user, and role are each created. By
default, fetches the 'keystone' service. This can be
overridden (for instance, to get the 'Identity Service'
service on older OpenStack), or None can be passed explicitly
to service_name to create a new service and then query it by
ID.
:param service_name: The name of the service to get by ID; or
None, to create an ephemeral service and
get it by ID.
"""
project = self.admin_keystone.create_project()
user = self.admin_keystone.create_user(project_id=project.id)
role = self.admin_keystone.create_role()
self.admin_keystone.get_project(project.id)
self.admin_keystone.get_user(user.id)
self.admin_keystone.get_role(role.id)
if service_name is None:
service = self.admin_keystone.create_service()
else:
service = self.admin_keystone.get_service_by_name(service_name)
self.admin_keystone.get_service(service.id)
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]},
name="KeystoneBasic.create_and_delete_service",
platform="openstack")
class CreateAndDeleteService(KeystoneBasic):
@logging.log_deprecated_args(
"The 'name' argument to create_and_delete_service will be ignored",
"0.0.5", ["name"])
def run(self, name=None, service_type=None, description=None):
"""Create and delete service.
:param service_type: type of the service
:param description: description of the service
"""
service = self.admin_keystone.create_service(service_type=service_type,
description=description)
self.admin_keystone.delete_service(service.id)
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]},
name="KeystoneBasic.create_update_and_delete_tenant",
platform="openstack")
class CreateUpdateAndDeleteTenant(KeystoneBasic):
@logging.log_deprecated_args(
"The 'name_length' argument to create_update_and_delete_tenant is "
"ignored", "0.1.2", ["name_length"], once=True)
def run(self, name_length=None, **kwargs):
"""Create, update and delete tenant.
:param kwargs: Other optional parameters for tenant creation
"""
project = self.admin_keystone.create_project(**kwargs)
new_name = self.generate_random_name()
new_description = self.generate_random_name()
self.admin_keystone.update_project(project.id, name=new_name,
description=new_description)
self.admin_keystone.delete_project(project.id)
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]},
name="KeystoneBasic.create_user_update_password",
platform="openstack")
class CreateUserUpdatePassword(KeystoneBasic):
@logging.log_deprecated_args(
"The 'name_length' and 'password_length' arguments to "
"create_user_update_password are ignored",
"0.1.2", ["name_length", "password_length"], once=True)
def run(self, name_length=None, password_length=None):
"""Create user and update password for that user."""
user = self.admin_keystone.create_user()
password = self.generate_random_name()
self.admin_keystone.update_user(user.id, password=password)
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]},
name="KeystoneBasic.create_and_list_services",
platform="openstack")
class CreateAndListServices(KeystoneBasic):
@logging.log_deprecated_args(
"The 'name' argument to create_and_list_services will be ignored",
"0.0.5", ["name"])
def run(self, name=None, service_type=None, description=None):
"""Create and list services.
:param service_type: type of the service
:param description: description of the service
"""
self.admin_keystone.create_service(service_type=service_type,
description=description)
self.admin_keystone.list_services()
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["keystone"]},
name="KeystoneBasic.create_and_list_ec2credentials",
platform="openstack")
class CreateAndListEc2Credentials(KeystoneBasic):
def run(self):
"""Create and List all keystone ec2-credentials."""
self.keystone.create_ec2credentials(
self.context["user"]["id"],
project_id=self.context["tenant"]["id"])
self.keystone.list_ec2credentials(self.context["user"]["id"])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["keystone"]},
name="KeystoneBasic.create_and_delete_ec2credential",
platform="openstack")
class CreateAndDeleteEc2Credential(KeystoneBasic):
def run(self):
"""Create and delete keystone ec2-credential."""
creds = self.keystone.create_ec2credentials(
self.context["user"]["id"],
project_id=self.context["tenant"]["id"])
self.keystone.delete_ec2credential(
self.context["user"]["id"], access=creds.access)
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]},
name="KeystoneBasic.create_and_get_role",
platform="openstack")
class CreateAndGetRole(KeystoneBasic):
def run(self, **kwargs):
"""Create a user role and get it detailed information.
:param kwargs: Optional additional arguments for roles creation
"""
role = self.admin_keystone.create_role(**kwargs)
self.admin_keystone.get_role(role.id)
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]},
name="KeystoneBasic.create_and_list_roles",
platform="openstack")
class CreateAddListRoles(KeystoneBasic):
def run(self, create_role_kwargs=None, list_role_kwargs=None):
"""Create a role, then list all roles.
:param create_role_kwargs: Optional additional arguments for
roles create
:param list_role_kwargs: Optional additional arguments for roles list
"""
create_role_kwargs = create_role_kwargs or {}
list_role_kwargs = list_role_kwargs or {}
role = self.admin_keystone.create_role(**create_role_kwargs)
msg = "Role isn't created"
self.assertTrue(role, err_msg=msg)
all_roles = self.admin_keystone.list_roles(**list_role_kwargs)
msg = ("Created role is not in the"
" list of all available roles")
self.assertIn(role, all_roles, err_msg=msg)
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["keystone"]},
name="KeystoneBasic.create_and_update_user",
platform="openstack")
class CreateAndUpdateUser(KeystoneBasic):
def run(self, create_user_kwargs=None, update_user_kwargs=None):
"""Create user and update the user.
:param create_user_kwargs: Optional additional arguments for user
creation
:param update_user_kwargs: Optional additional arguments for user
updation
"""
create_user_kwargs = create_user_kwargs or {}
user = self.admin_keystone.create_user(**create_user_kwargs)
self.admin_keystone.update_user(user.id, **update_user_kwargs)
user_data = self.admin_clients("keystone").users.get(user.id)
for args in update_user_kwargs:
msg = ("%s isn't updated" % args)
self.assertEqual(getattr(user_data, str(args)),
update_user_kwargs[args], err_msg=msg)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,659
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/rally_jobs/test_zuul_jobs.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import yaml
import rally_openstack
from tests.unit import test
class RallyJobsTestCase(test.TestCase):
root_dir = os.path.dirname(os.path.dirname(rally_openstack.__file__))
zuul_jobs_path = os.path.join(root_dir, ".zuul.d")
def setUp(self):
super(RallyJobsTestCase, self).setUp()
with open(os.path.join(self.zuul_jobs_path, "zuul.yaml")) as f:
self.zuul_cfg = yaml.safe_load(f)
self.project_cfg = None
for item in self.zuul_cfg:
if "project" in item:
self.project_cfg = item["project"]
break
if self.project_cfg is None:
self.fail("Cannot detect project section from zuul config.")
@staticmethod
def _parse_job(job):
if isinstance(job, dict):
job_name = list(job)[0]
job_cfg = job[job_name]
return job_name, job_cfg
return job, None
@staticmethod
def _tox_job_sorter(job_name):
python_maj_version = 0
python_min_version = 0
_rally, _tox, job_name = job_name.split("-", 3)
if job_name.startswith("py"):
python_maj_version = int(job_name[2])
python_min_version = int(job_name[3:])
job_name = "py"
return job_name, python_maj_version, python_min_version
def _check_order_of_jobs(self, pipeline):
jobs = self.project_cfg[pipeline]["jobs"]
specific_jobs = ["rally-dsvm-tox-functional",
"rally-openstack-docker-build",
"rally-task-basic-with-existing-users",
"rally-task-simple-job"]
error_message = (
f"[{pipeline} pipeline] We are trying to display jobs in a "
f"specific order to simplify search and reading. Tox jobs should "
f"go first in alphabetic order. Next several specific jobs are "
f"expected ({', '.join(specific_jobs)}). "
f"Next - all other jobs in alphabetic order."
)
error_message += "\nPlease place '%s' at the position of '%s'."
jobs_names = [self._parse_job(job)[0] for job in jobs]
tox_jobs = sorted(
(job for job in jobs_names if job.startswith("rally-tox-")),
key=self._tox_job_sorter
)
for i, job in enumerate(tox_jobs):
if job != jobs[i]:
self.fail(error_message % (job, jobs[i]))
for job in specific_jobs:
if job not in jobs_names:
continue
i += 1
if job != jobs_names[i]:
self.fail(error_message % (job, jobs_names[i]))
i += 1
other_jobs = sorted(jobs_names[i: len(jobs_names)])
for j, job in enumerate(other_jobs):
if job != jobs_names[i + j]:
self.fail(error_message % (job, jobs_names[i + j]))
def test_order_of_displaying_jobs(self):
for pipeline in ("check", "gate"):
self._check_order_of_jobs(pipeline=pipeline)
JOB_FILES_PARAMS = {"files", "irrelevant-files"}
def test_job_configs(self):
file_matchers = {}
for pipeline in ("check", "gate"):
for job in self.project_cfg[pipeline]["jobs"]:
job_name, job_cfg = self._parse_job(job)
if job_cfg is None:
continue
if pipeline == "gate":
params = set(job_cfg) - self.JOB_FILES_PARAMS
if params:
self.fail(
f"Invalid parameter(s) for '{job_name}' job at "
f"gate pipeline: {', '.join(params)}.")
for param in self.JOB_FILES_PARAMS:
if param in job_cfg:
for file_matcher in job_cfg[param]:
file_matchers.setdefault(
file_matcher,
{
"matcher": re.compile(file_matcher),
"used_by": []
}
)
file_matchers[file_matcher]["used_by"].append(
{
"pipeline": pipeline,
"job": job_name,
"param": param
}
)
not_matched = set(file_matchers)
for dir_name, _, files in os.walk(self.root_dir):
dir_name = os.path.relpath(dir_name, self.root_dir)
if dir_name in (".tox", ".git"):
continue
for f in files:
full_path = os.path.join(dir_name, f)
for key in list(not_matched):
if file_matchers[key]["matcher"].match(full_path):
not_matched.remove(key)
if not not_matched:
# stop iterating files if no more matchers to check
break
if not not_matched:
# stop iterating files if no more matchers to check
break
for key in not_matched:
user = file_matchers[key]["used_by"][0]
self.fail(
f"'{user['job']}' job configuration for "
f"'{user['pipeline']}' pipeline includes wrong "
f"matcher '{key}' at '{user['param']}'."
)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,660
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/swift/objects.py
|
# Copyright 2015: Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.swift import utils
"""Scenarios for Swift Objects."""
@validation.add("required_services", services=[consts.Service.SWIFT])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(
context={"cleanup@openstack": ["swift"]},
name="SwiftObjects.create_container_and_object_then_list_objects",
platform="openstack")
class CreateContainerAndObjectThenListObjects(utils.SwiftScenario):
def run(self, objects_per_container=1, object_size=1024, **kwargs):
"""Create container and objects then list all objects.
:param objects_per_container: int, number of objects to upload
:param object_size: int, temporary local object size
:param kwargs: dict, optional parameters to create container
"""
with tempfile.TemporaryFile() as dummy_file:
# set dummy file to specified object size
dummy_file.truncate(object_size)
container_name = self._create_container(**kwargs)
for i in range(objects_per_container):
dummy_file.seek(0)
self._upload_object(container_name, dummy_file)
self._list_objects(container_name)
@validation.add("required_services", services=[consts.Service.SWIFT])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(
context={"cleanup@openstack": ["swift"]},
name="SwiftObjects.create_container_and_object_then_delete_all",
platform="openstack")
class CreateContainerAndObjectThenDeleteAll(utils.SwiftScenario):
def run(self, objects_per_container=1, object_size=1024, **kwargs):
"""Create container and objects then delete everything created.
:param objects_per_container: int, number of objects to upload
:param object_size: int, temporary local object size
:param kwargs: dict, optional parameters to create container
"""
container_name = None
objects_list = []
with tempfile.TemporaryFile() as dummy_file:
# set dummy file to specified object size
dummy_file.truncate(object_size)
container_name = self._create_container(**kwargs)
for i in range(objects_per_container):
dummy_file.seek(0)
object_name = self._upload_object(container_name,
dummy_file)[1]
objects_list.append(object_name)
for object_name in objects_list:
self._delete_object(container_name, object_name)
self._delete_container(container_name)
@validation.add("required_services", services=[consts.Service.SWIFT])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(
context={"cleanup@openstack": ["swift"]},
name="SwiftObjects.create_container_and_object_then_download_object",
platform="openstack")
class CreateContainerAndObjectThenDownloadObject(utils.SwiftScenario):
def run(self, objects_per_container=1, object_size=1024, **kwargs):
"""Create container and objects then download all objects.
:param objects_per_container: int, number of objects to upload
:param object_size: int, temporary local object size
:param kwargs: dict, optional parameters to create container
"""
container_name = None
objects_list = []
with tempfile.TemporaryFile() as dummy_file:
# set dummy file to specified object size
dummy_file.truncate(object_size)
container_name = self._create_container(**kwargs)
for i in range(objects_per_container):
dummy_file.seek(0)
object_name = self._upload_object(container_name,
dummy_file)[1]
objects_list.append(object_name)
for object_name in objects_list:
self._download_object(container_name, object_name)
@validation.add("required_services", services=[consts.Service.SWIFT])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(
context={"swift_objects@openstack": {}},
name="SwiftObjects.list_objects_in_containers",
platform="openstack")
class ListObjectsInContainers(utils.SwiftScenario):
def run(self):
"""List objects in all containers."""
containers = self._list_containers()[1]
for container in containers:
self._list_objects(container["name"])
@validation.add("required_services", services=[consts.Service.SWIFT])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(
context={"swift_objects@openstack": {}},
name="SwiftObjects.list_and_download_objects_in_containers",
platform="openstack")
class ListAndDownloadObjectsInContainers(utils.SwiftScenario):
def run(self):
"""List and download objects in all containers."""
containers = self._list_containers()[1]
objects_dict = {}
for container in containers:
container_name = container["name"]
objects_dict[container_name] = self._list_objects(
container_name)[1]
for container_name, objects in objects_dict.items():
for obj in objects:
self._download_object(container_name, obj["name"])
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,661
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/cinder/volume_types.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import logging
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.cinder import utils as cinder_utils
LOG = logging.getLogger(__name__)
"""Scenarios for Cinder Volume Type."""
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["cinder"]},
name="CinderVolumeTypes.create_and_delete_volume_type",
platform="openstack")
class CreateAndDeleteVolumeType(cinder_utils.CinderBasic):
def run(self, description=None, is_public=True):
"""Create and delete a volume Type.
:param description: Description of the volume type
:param is_public: Volume type visibility
"""
volume_type = self.admin_cinder.create_volume_type(
description=description,
is_public=is_public)
self.admin_cinder.delete_volume_type(volume_type)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["cinder"]},
name="CinderVolumeTypes.create_and_get_volume_type",
platform="openstack")
class CreateAndGetVolumeType(cinder_utils.CinderBasic):
def run(self, description=None, is_public=True):
"""Create a volume Type, then get the details of the type.
:param description: Description of the volume type
:param is_public: Volume type visibility
"""
volume_type = self.admin_cinder.create_volume_type(
description=description,
is_public=is_public)
self.admin_cinder.get_volume_type(volume_type)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_api_versions", component="cinder",
versions=["2", "3"])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["cinder"]},
name="CinderVolumeTypes.create_and_update_volume_type",
platform="openstack")
class CreateAndUpdateVolumeType(cinder_utils.CinderBasic):
def run(self, description=None, is_public=True, update_name=False,
update_description=None, update_is_public=None):
"""create a volume type, then update the type.
:param description: Description of the volume type
:param is_public: Volume type visibility
:param update_name: if True, can update name by generating random name.
if False, don't update name.
:param update_description: a description to set while update
:param update_is_public: update Volume type visibility
"""
volume_type = self.admin_cinder.create_volume_type(
description=description,
is_public=is_public)
updated_name = self.generate_random_name() if update_name else None
if not update_name and not update_description and not update_is_public:
LOG.warning("Something should be updated.")
# transmit at least some value to update api call
updated_name = volume_type.name
updated_is_public = not is_public if update_is_public else None
self.admin_cinder.update_volume_type(
volume_type,
name=updated_name,
description=update_description,
is_public=updated_is_public)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["cinder"]},
name="CinderVolumeTypes.create_and_list_volume_types",
platform="openstack")
class CreateAndListVolumeTypes(cinder_utils.CinderBasic):
def run(self, description=None, is_public=True):
"""Create a volume Type, then list all types.
:param description: Description of the volume type
:param is_public: Volume type visibility
"""
volume_type = self.admin_cinder.create_volume_type(
description=description,
is_public=is_public)
pool_list = self.admin_cinder.list_types()
msg = ("type not included into list of available types "
"created type: {}\n"
"pool of types: {}\n").format(volume_type, pool_list)
self.assertIn(volume_type.id,
[vtype.id for vtype in pool_list],
err_msg=msg)
@validation.add("required_params", params=[("create_specs", "provider")])
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(
context={"admin_cleanup@openstack": ["cinder"]},
name="CinderVolumeTypes.create_volume_type_and_encryption_type",
platform="openstack")
class CreateVolumeTypeAndEncryptionType(cinder_utils.CinderBasic):
def run(self, create_specs=None, provider=None, cipher=None,
key_size=None, control_location="front-end", description=None,
is_public=True):
"""Create encryption type
This scenario first creates a volume type, then creates an encryption
type for the volume type.
:param create_specs: The encryption type specifications to add.
DEPRECATED, specify arguments explicitly.
:param provider: The class that provides encryption support. For
example, LuksEncryptor.
:param cipher: The encryption algorithm or mode.
:param key_size: Size of encryption key, in bits.
:param control_location: Notional service where encryption is
performed. Valid values are "front-end"
or "back-end."
:param description: Description of the volume type
:param is_public: Volume type visibility
"""
volume_type = self.admin_cinder.create_volume_type(
description=description,
is_public=is_public)
if create_specs is None:
specs = {
"provider": provider,
"cipher": cipher,
"key_size": key_size,
"control_location": control_location
}
else:
LOG.warning("The argument `create_spec` is deprecated since"
" Rally 0.10.0. Specify all arguments from it"
" explicitly.")
specs = create_specs
self.admin_cinder.create_encryption_type(volume_type,
specs=specs)
@validation.add("required_params", params=[("create_specs", "provider")])
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(
context={"admin_cleanup@openstack": ["cinder"]},
name="CinderVolumeTypes.create_and_list_encryption_type",
platform="openstack")
class CreateAndListEncryptionType(cinder_utils.CinderBasic):
def run(self, create_specs=None, provider=None, cipher=None,
key_size=None, control_location="front-end", search_opts=None):
"""Create and list encryption type
This scenario firstly creates a volume type, secondly creates an
encryption type for the volume type, thirdly lists all encryption
types.
:param create_specs: The encryption type specifications to add.
DEPRECATED, specify arguments explicitly.
:param provider: The class that provides encryption support. For
example, LuksEncryptor.
:param cipher: The encryption algorithm or mode.
:param key_size: Size of encryption key, in bits.
:param control_location: Notional service where encryption is
performed. Valid values are "front-end"
or "back-end."
:param search_opts: Options used when search for encryption types
"""
vt_idx = self.context["iteration"] % len(self.context["volume_types"])
volume_type = self.context["volume_types"][vt_idx]
if create_specs is None:
specs = {
"provider": provider,
"cipher": cipher,
"key_size": key_size,
"control_location": control_location
}
else:
LOG.warning("The argument `create_spec` is deprecated since"
" Rally 0.10.0. Specify all arguments from it"
" explicitly.")
specs = create_specs
self.admin_cinder.create_encryption_type(volume_type["id"],
specs=specs)
self.admin_cinder.list_encryption_type(search_opts)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["cinder"]},
name="CinderVolumeTypes.create_and_set_volume_type_keys",
platform="openstack")
class CreateAndSetVolumeTypeKeys(cinder_utils.CinderBasic):
def run(self, volume_type_key, description=None, is_public=True):
"""Create and set a volume type's extra specs.
:param volume_type_key: A dict of key/value pairs to be set
:param description: Description of the volume type
:param is_public: Volume type visibility
"""
volume_type = self.admin_cinder.create_volume_type(
description=description,
is_public=is_public)
self.admin_cinder.set_volume_type_keys(volume_type,
metadata=volume_type_key)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_contexts", contexts="volume_types")
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(
context={"admin_cleanup@openstack": ["cinder"]},
name="CinderVolumeTypes.create_get_and_delete_encryption_type",
platform="openstack")
class CreateGetAndDeleteEncryptionType(cinder_utils.CinderBasic):
def run(self, provider=None, cipher=None,
key_size=None, control_location="front-end"):
"""Create get and delete an encryption type
This scenario firstly creates an encryption type for a volume
type created in the context, then gets detailed information of
the created encryption type, finally deletes the created
encryption type.
:param provider: The class that provides encryption support. For
example, LuksEncryptor.
:param cipher: The encryption algorithm or mode.
:param key_size: Size of encryption key, in bits.
:param control_location: Notional service where encryption is
performed. Valid values are "front-end"
or "back-end."
"""
vt_idx = self.context["iteration"] % len(self.context["volume_types"])
volume_type = self.context["volume_types"][vt_idx]
specs = {
"provider": provider,
"cipher": cipher,
"key_size": key_size,
"control_location": control_location
}
self.admin_cinder.create_encryption_type(volume_type["id"],
specs=specs)
self.admin_cinder.get_encryption_type(volume_type["id"])
self.admin_cinder.delete_encryption_type(volume_type["id"])
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_contexts", contexts="volume_types")
@validation.add("required_params", params=[("create_specs", "provider")])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(
context={"admin_cleanup@openstack": ["cinder"]},
name="CinderVolumeTypes.create_and_delete_encryption_type",
platform="openstack")
class CreateAndDeleteEncryptionType(cinder_utils.CinderBasic):
def run(self, create_specs=None, provider=None, cipher=None,
key_size=None, control_location="front-end"):
"""Create and delete encryption type
This scenario firstly creates an encryption type for a given
volume type, then deletes the created encryption type.
:param create_specs: the encryption type specifications to add
:param provider: The class that provides encryption support. For
example, LuksEncryptor.
:param cipher: The encryption algorithm or mode.
:param key_size: Size of encryption key, in bits.
:param control_location: Notional service where encryption is
performed. Valid values are "front-end"
or "back-end."
"""
vt_idx = self.context["iteration"] % len(self.context["volume_types"])
volume_type = self.context["volume_types"][vt_idx]
if create_specs is None:
specs = {
"provider": provider,
"cipher": cipher,
"key_size": key_size,
"control_location": control_location
}
else:
LOG.warning("The argument `create_spec` is deprecated since"
" Rally 0.10.0. Specify all arguments from it"
" explicitly.")
specs = create_specs
self.admin_cinder.create_encryption_type(volume_type["id"],
specs=specs)
self.admin_cinder.delete_encryption_type(volume_type["id"])
@validation.add("required_services", services=consts.Service.CINDER)
@validation.add("required_contexts", contexts="volume_types")
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["cinder"]},
name="CinderVolumeTypes.create_and_update_encryption_type",
platform="openstack")
class CreateAndUpdateEncryptionType(cinder_utils.CinderBasic):
def run(self, create_provider=None, create_cipher=None,
create_key_size=None, create_control_location="front-end",
update_provider=None, update_cipher=None,
update_key_size=None, update_control_location=None):
"""Create and update encryption type
This scenario firstly creates a volume type, secondly creates an
encryption type for the volume type, thirdly updates the encryption
type.
:param create_provider: The class that provides encryption support. For
example, LuksEncryptor.
:param create_cipher: The encryption algorithm or mode.
:param create_key_size: Size of encryption key, in bits.
:param create_control_location: Notional service where encryption is
performed. Valid values are "front-end"
or "back-end."
:param update_provider: The class that provides encryption support. For
example, LuksEncryptor.
:param update_cipher: The encryption algorithm or mode.
:param update_key_size: Size of encryption key, in bits.
:param update_control_location: Notional service where encryption is
performed. Valid values are "front-end"
or "back-end."
"""
vt_idx = self.context["iteration"] % len(self.context["volume_types"])
volume_type = self.context["volume_types"][vt_idx]
create_specs = {
"provider": create_provider,
"cipher": create_cipher,
"key_size": create_key_size,
"control_location": create_control_location
}
update_specs = {
"provider": update_provider,
"cipher": update_cipher,
"key_size": update_key_size,
"control_location": update_control_location
}
self.admin_cinder.create_encryption_type(volume_type["id"],
specs=create_specs)
self.admin_cinder.update_encryption_type(volume_type["id"],
specs=update_specs)
@validation.add("required_platform", platform="openstack", admin=True)
@validation.add("required_api_versions", component="cinder",
versions=["2", "3"])
@validation.add("required_services", services=consts.Service.CINDER)
@scenario.configure(
context={"admin_cleanup@openstack": ["cinder"]},
name="CinderVolumeTypes.create_volume_type_add_and_list_type_access",
platform="openstack")
class CreateVolumeTypeAddAndListTypeAccess(cinder_utils.CinderBasic):
def run(self, description=None, is_public=False):
"""Add and list volume type access for the given project.
This scenario first creates a private volume type, then add project
access and list project access to it.
:param description: Description of the volume type
:param is_public: Volume type visibility
"""
volume_type = self.admin_cinder.create_volume_type(
description=description, is_public=is_public
)
self.admin_cinder.add_type_access(
volume_type, project=self.context["tenant"]["id"]
)
self.admin_cinder.list_type_access(volume_type)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,662
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/common/services/network/test_net_utils.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.common.services.network import net_utils
from tests.unit import test
PATH = "rally_openstack.common.services.network.net_utils"
class FunctionsTestCase(test.TestCase):
def test_generate_cidr(self):
with mock.patch("%s._IPv4_CIDR_INCR" % PATH, iter(range(1, 4))):
self.assertEqual((4, "10.2.1.0/24"), net_utils.generate_cidr())
self.assertEqual((4, "10.2.2.0/24"), net_utils.generate_cidr())
self.assertEqual((4, "10.2.3.0/24"), net_utils.generate_cidr())
with mock.patch("%s._IPv4_CIDR_INCR" % PATH, iter(range(1, 4))):
start_cidr = "1.1.0.0/26"
self.assertEqual(
(4, "1.1.0.64/26"),
net_utils.generate_cidr(start_cidr=start_cidr))
self.assertEqual(
(4, "1.1.0.128/26"),
net_utils.generate_cidr(start_cidr=start_cidr))
self.assertEqual(
(4, "1.1.0.192/26"),
net_utils.generate_cidr(start_cidr=start_cidr))
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,663
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/contexts/quotas/test_manila_quotas.py
|
# Copyright 2015 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.contexts.quotas import manila_quotas
from tests.unit import test
class ManilaQuotasTestCase(test.TestCase):
def test_update(self):
clients = mock.MagicMock()
instance = manila_quotas.ManilaQuotas(clients)
tenant_id = mock.MagicMock()
quotas_values = {
"shares": 10,
"gigabytes": 13,
"snapshots": 7,
"snapshot_gigabytes": 51,
"share_networks": 1014,
}
instance.update(tenant_id, **quotas_values)
clients.manila.return_value.quotas.update.assert_called_once_with(
tenant_id, **quotas_values)
def test_delete(self):
clients = mock.MagicMock()
instance = manila_quotas.ManilaQuotas(clients)
tenant_id = mock.MagicMock()
instance.delete(tenant_id)
clients.manila.return_value.quotas.delete.assert_called_once_with(
tenant_id)
def test_get(self):
tenant_id = "tenant_id"
quotas = {"gigabytes": "gb", "snapshots": "ss", "shares": "v",
"snapshot_gigabytes": "sg", "share_networks": "sn"}
quota_set = mock.MagicMock(**quotas)
clients = mock.MagicMock()
clients.manila.return_value.quotas.get.return_value = quota_set
manila_quo = manila_quotas.ManilaQuotas(clients)
self.assertEqual(quotas, manila_quo.get(tenant_id))
clients.manila().quotas.get.assert_called_once_with(tenant_id)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,664
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/common/services/loadbalancer/test_octavia.py
|
# Copyright 2018: Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import fixtures
from rally.common import cfg
from rally import exceptions
from rally_openstack.common.services.loadbalancer import octavia
from tests.unit import test
BASE_PATH = "rally_openstack.common.services.loadbalancer"
CONF = cfg.CONF
class LoadBalancerServiceTestCase(test.TestCase):
def setUp(self):
super(LoadBalancerServiceTestCase, self).setUp()
self.clients = mock.MagicMock()
self.name_generator = mock.MagicMock()
self.service = octavia.Octavia(self.clients,
name_generator=self.name_generator)
self.mock_wait_for_status = fixtures.MockPatch(
"rally.task.utils.wait_for_status")
self.useFixture(self.mock_wait_for_status)
def _get_context(self):
context = test.get_test_context()
context.update({
"user": {
"id": "fake_user",
"tenant_id": "fake_tenant",
"credential": mock.MagicMock()
},
"tenant": {"id": "fake_tenant",
"networks": [{"id": "fake_net",
"subnets": ["fake_subnet"]}]}})
return context
def atomic_actions(self):
return self.service._atomic_actions
def test_load_balancer_list(self):
self.service.load_balancer_list(),
self.service._clients.octavia().load_balancer_list \
.assert_called_once_with()
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.load_balancer_list")
def test_load_balancer_show(self):
lb = {"id": "loadbalancer-id"}
self.service.load_balancer_show(lb["id"])
self.service._clients.octavia().load_balancer_show \
.assert_called_once_with(lb["id"])
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.load_balancer_show")
def test_load_balancer_show_fail_404(self):
fake_lb = {"id": "fake_lb"}
ex = Exception()
ex.code = 404
self.service._clients.octavia().load_balancer_show.side_effect = ex
self.assertRaises(
exceptions.GetResourceNotFound,
self.service.load_balancer_show, fake_lb["id"])
def test_load_balancer_show_resource_fail(self):
fake_lb = {"id": "fake_lb"}
ex = Exception()
self.service._clients.octavia().load_balancer_show.side_effect = ex
self.assertRaises(
exceptions.GetResourceFailure,
self.service.load_balancer_show, fake_lb["id"])
def test_load_balancer_create(self):
self.service.generate_random_name = mock.MagicMock(
return_value="lb")
self.service.load_balancer_create("subnet_id")
self.service._clients.octavia().load_balancer_create \
.assert_called_once_with(json={
"loadbalancer": {"name": "lb",
"admin_state_up": True,
"vip_qos_policy_id": None,
"listeners": None,
"project_id": None,
"provider": None,
"vip_subnet_id": "subnet_id",
"description": None}})
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.load_balancer_create")
def test_load_balancer_delete(self):
self.service.load_balancer_delete("lb-id")
self.service._clients.octavia().load_balancer_delete \
.assert_called_once_with("lb-id", cascade=False)
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.load_balancer_delete")
def test_load_balancer_set(self):
self.service.generate_random_name = mock.MagicMock(
return_value="new_lb")
lb_update_args = {"name": "new_lb_name"}
self.service.load_balancer_set(
"lb-id", lb_update_args=lb_update_args)
self.service._clients.octavia().load_balancer_set \
.assert_called_once_with(
"lb-id", json={"loadbalancer": {"name": "new_lb_name"}})
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.load_balancer_set")
def test_load_balancer_stats_show(self):
lb = {"id": "new_lb"}
self.assertEqual(
self.service.load_balancer_stats_show(lb, kwargs={}),
self.service._clients.octavia()
.load_balancer_stats_show.return_value)
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.load_balancer_stats_show")
def test_load_balancer_failover(self):
lb = {"id": "new_lb"}
self.service.load_balancer_failover(lb["id"])
self.service._clients.octavia().load_balancer_failover \
.assert_called_once_with(lb["id"])
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.load_balancer_failover")
def test_listener_list(self):
self.service.listener_list()
self.service._clients.octavia().listener_list \
.assert_called_once_with()
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.listener_list")
def test_listener_show(self):
self.service.listener_show(listener_id="listener_id")
self.service._clients.octavia().listener_show \
.assert_called_once_with("listener_id")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.listener_show")
def test_listener_create(self):
self.service.listener_create()
self.service._clients.octavia().listener_create \
.assert_called_once_with()
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.listener_create")
def test_listener_delete(self):
self.service.listener_delete(listener_id="listener_id")
self.service._clients.octavia().listener_delete \
.assert_called_once_with("listener_id")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.listener_delete")
def test_listener_set(self):
self.service.listener_set(listener_id="listener_id")
self.service._clients.octavia().listener_set \
.assert_called_once_with("listener_id")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.listener_set")
def test_listener_stats_show(self):
self.service.listener_stats_show(listener_id="listener_id")
self.service._clients.octavia().listener_stats_show \
.assert_called_once_with("listener_id")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.listener_stats_show")
def test_pool_list(self):
self.service.pool_list()
self.service._clients.octavia().pool_list \
.assert_called_once_with()
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.pool_list")
def test_update_pool_resource(self):
fake_pool = {"id": "pool-id"}
self.service.update_pool_resource(fake_pool)
self.service._clients.octavia().pool_show \
.assert_called_once_with("pool-id")
def test_update_pool_resource_fail_404(self):
fake_pool = {"id": "pool-id"}
ex = Exception()
ex.status_code = 404
self.service._clients.octavia().pool_show.side_effect = ex
self.assertRaises(
exceptions.GetResourceNotFound,
self.service.update_pool_resource, fake_pool)
def test_update_pool_resource_fail(self):
fake_pool = {"id": "pool-id"}
ex = Exception()
self.service._clients.octavia().pool_show.side_effect = ex
self.assertRaises(
exceptions.GetResourceFailure,
self.service.update_pool_resource, fake_pool)
def test_pool_create(self):
self.service.generate_random_name = mock.MagicMock(
return_value="pool")
self.service.pool_create(
lb_id="loadbalancer-id",
protocol="HTTP",
lb_algorithm="ROUND_ROBIN")
self.service._clients.octavia().pool_create \
.assert_called_once_with(
json={"pool": {
"lb_algorithm": "ROUND_ROBIN",
"project_id": None,
"protocol": "HTTP",
"listener_id": None,
"description": None,
"admin_state_up": True,
"session_persistence": None,
"loadbalancer_id": "loadbalancer-id",
"name": "pool"}})
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.pool_create")
def test_pool_delete(self):
self.service.pool_delete(pool_id="fake_pool")
self.service._clients.octavia().pool_delete \
.assert_called_once_with("fake_pool")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.pool_delete")
def test_pool_show(self):
self.service.pool_show(pool_id="fake_pool")
self.service._clients.octavia().pool_show \
.assert_called_once_with("fake_pool")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.pool_show")
def test_pool_set(self):
pool_update_args = {"name": "new-pool-name"}
self.service.pool_set(
pool_id="fake_pool",
pool_update_args=pool_update_args)
self.service._clients.octavia().pool_set \
.assert_called_once_with(
"fake_pool",
json={"pool": {"name": "new-pool-name"}})
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.pool_set")
def test_member_list(self):
self.service.member_list(pool_id="fake_pool")
self.service._clients.octavia().member_list \
.assert_called_once_with("fake_pool")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.member_list")
def test_member_show(self):
self.service.member_show(pool_id="fake_pool", member_id="fake_member")
self.service._clients.octavia().member_show \
.assert_called_once_with("fake_pool", "fake_member")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.member_show")
def test_member_create(self):
self.service.member_create(pool_id="fake_pool")
self.service._clients.octavia().member_create \
.assert_called_once_with("fake_pool")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.member_create")
def test_member_delete(self):
self.service.member_delete(
pool_id="fake_pool", member_id="fake_member")
self.service._clients.octavia().member_delete \
.assert_called_once_with("fake_pool", "fake_member")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.member_delete")
def test_member_set(self):
self.service.member_set(pool_id="fake_pool", member_id="fake_member")
self.service._clients.octavia().member_set \
.assert_called_once_with("fake_pool", "fake_member")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.member_set")
def test_l7policy_list(self):
self.service.l7policy_list()
self.service._clients.octavia().l7policy_list \
.assert_called_once_with()
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.l7policy_list")
def test_l7policy_create(self):
self.service.l7policy_create()
self.service._clients.octavia().l7policy_create \
.assert_called_once_with()
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.l7policy_create")
def test_l7policy_delete(self):
self.service.l7policy_delete(l7policy_id="fake_policy")
self.service._clients.octavia().l7policy_delete \
.assert_called_once_with("fake_policy")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.l7policy_delete")
def test_l7policy_show(self):
self.service.l7policy_show(l7policy_id="fake_policy")
self.service._clients.octavia().l7policy_show \
.assert_called_once_with("fake_policy")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.l7policy_show")
def test_l7policy_set(self):
self.service.l7policy_set(l7policy_id="fake_policy")
self.service._clients.octavia().l7policy_set \
.assert_called_once_with("fake_policy")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.l7policy_set")
def test_l7rule_list(self):
self.service.l7rule_list(l7policy_id="fake_policy")
self.service._clients.octavia().l7rule_list \
.assert_called_once_with("fake_policy")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.l7rule_list")
def test_l7rule_create(self):
self.service.l7rule_create(l7policy_id="fake_policy")
self.service._clients.octavia().l7rule_create \
.assert_called_once_with("fake_policy")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.l7rule_create")
def test_l7rule_delete(self):
self.service.l7rule_delete(
l7rule_id="fake_id", l7policy_id="fake_policy")
self.service._clients.octavia().l7rule_delete \
.assert_called_once_with("fake_id", "fake_policy")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.l7rule_delete")
def test_l7rule_show(self):
self.service.l7rule_show(
l7rule_id="fake_id", l7policy_id="fake_policy")
self.service._clients.octavia().l7rule_show \
.assert_called_once_with("fake_id", "fake_policy")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.l7rule_show")
def test_l7rule_set(self):
self.service.l7rule_set(l7rule_id="fake_id", l7policy_id="fake_policy")
self.service._clients.octavia().l7rule_set \
.assert_called_once_with("fake_id", "fake_policy")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.l7rule_set")
def test_health_monitor_list(self):
self.service.health_monitor_list()
self.service._clients.octavia().health_monitor_list \
.assert_called_once_with()
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.health_monitor_list")
def test_health_monitor_create(self):
self.service.health_monitor_create()
self.service._clients.octavia().health_monitor_create \
.assert_called_once_with()
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.health_monitor_create")
def test_health_monitor_delete(self):
self.service.health_monitor_delete(health_monitor_id="fake_monitor_id")
self.service._clients.octavia().health_monitor_delete \
.assert_called_once_with("fake_monitor_id")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.health_monitor_delete")
def test_health_monitor_show(self):
self.service.health_monitor_show(health_monitor_id="fake_monitor_id")
self.service._clients.octavia().health_monitor_show \
.assert_called_once_with("fake_monitor_id")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.health_monitor_show")
def test_health_monitor_set(self):
self.service.health_monitor_set(health_monitor_id="fake_monitor_id")
self.service._clients.octavia().health_monitor_set \
.assert_called_once_with("fake_monitor_id")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.health_monitor_set")
def test_quota_list(self):
self.service.quota_list(params="fake_params")
self.service._clients.octavia().quota_list \
.assert_called_once_with("fake_params")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.quota_list")
def test_quota_show(self):
self.service.quota_show(project_id="fake_project")
self.service._clients.octavia().quota_show \
.assert_called_once_with("fake_project")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.quota_show")
def test_quota_reset(self):
self.service.quota_reset(project_id="fake_project")
self.service._clients.octavia().quota_reset \
.assert_called_once_with("fake_project")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.quota_reset")
def test_quota_set(self):
self.service.quota_set(project_id="fake_project",
params="fake_params")
self.service._clients.octavia().quota_set \
.assert_called_once_with("fake_project", "fake_params")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.quota_set")
def test_quota_defaults_show(self):
self.service.quota_defaults_show()
self.service._clients.octavia().quota_defaults_show \
.assert_called_once_with()
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.quota_defaults_show")
def test_amphora_show(self):
self.service.amphora_show(amphora_id="fake_amphora")
self.service._clients.octavia().amphora_show \
.assert_called_once_with("fake_amphora")
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.amphora_show")
def test_amphora_list(self):
self.service.amphora_list()
self.service._clients.octavia().amphora_list \
.assert_called_once_with()
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.amphora_list")
@mock.patch("%s.Ocvita.wait_for_loadbalancer_prov_status" % BASE_PATH)
def wait_for_loadbalancer_prov_status(self, mock_wait_for_status):
fake_lb = {}
self.service.wait_for_loadbalancer_prov_status(lb=fake_lb)
self.assertTrue(mock_wait_for_status.called)
self._test_atomic_action_timer(self.atomic_actions(),
"octavia.wait_for_loadbalancers")
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,665
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/sahara/jobs.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import logging
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.sahara import utils
LOG = logging.getLogger(__name__)
@validation.add("required_services", services=[consts.Service.SAHARA])
@validation.add("required_contexts", contexts=["users", "sahara_image",
"sahara_job_binaries",
"sahara_cluster"])
@scenario.configure(context={"cleanup@openstack": ["sahara"]},
name="SaharaJob.create_launch_job",
platform="openstack")
class CreateLaunchJob(utils.SaharaScenario):
def run(self, job_type, configs, job_idx=0):
"""Create and execute a Sahara EDP Job.
This scenario Creates a Job entity and launches an execution on a
Cluster.
:param job_type: type of the Data Processing Job
:param configs: config dict that will be passed to a Job Execution
:param job_idx: index of a job in a sequence. This index will be
used to create different atomic actions for each job
in a sequence
"""
mains = self.context["tenant"]["sahara"]["mains"]
libs = self.context["tenant"]["sahara"]["libs"]
name = self.generate_random_name()
job = self.clients("sahara").jobs.create(name=name,
type=job_type,
description="",
mains=mains,
libs=libs)
cluster_id = self.context["tenant"]["sahara"]["cluster"]
if job_type.lower() == "java":
input_id = None
output_id = None
else:
input_id = self.context["tenant"]["sahara"]["input"]
output_id = self._create_output_ds().id
self._run_job_execution(job_id=job.id,
cluster_id=cluster_id,
input_id=input_id,
output_id=output_id,
configs=configs,
job_idx=job_idx)
@validation.add("required_services", services=[consts.Service.SAHARA])
@validation.add("required_contexts", contexts=["users", "sahara_image",
"sahara_job_binaries",
"sahara_cluster"])
@scenario.configure(context={"cleanup@openstack": ["sahara"]},
name="SaharaJob.create_launch_job_sequence",
platform="openstack")
class CreateLaunchJobSequence(utils.SaharaScenario):
def run(self, jobs):
"""Create and execute a sequence of the Sahara EDP Jobs.
This scenario Creates a Job entity and launches an execution on a
Cluster for every job object provided.
:param jobs: list of jobs that should be executed in one context
"""
launch_job = CreateLaunchJob(self.context)
for idx, job in enumerate(jobs):
LOG.debug("Launching Job. Sequence #%d" % idx)
launch_job.run(job["job_type"], job["configs"], idx)
@validation.add("required_services", services=[consts.Service.SAHARA])
@validation.add("required_contexts", contexts=["users", "sahara_image",
"sahara_job_binaries",
"sahara_cluster"])
@scenario.configure(context={"cleanup@openstack": ["sahara"]},
name="SaharaJob.create_launch_job_sequence_with_scaling",
platform="openstack")
class CreateLaunchJobSequenceWithScaling(utils.SaharaScenario,):
def run(self, jobs, deltas):
"""Create and execute Sahara EDP Jobs on a scaling Cluster.
This scenario Creates a Job entity and launches an execution on a
Cluster for every job object provided. The Cluster is scaled according
to the deltas values and the sequence is launched again.
:param jobs: list of jobs that should be executed in one context
:param deltas: list of integers which will be used to add or
remove worker nodes from the cluster
"""
cluster_id = self.context["tenant"]["sahara"]["cluster"]
launch_job_sequence = CreateLaunchJobSequence(self.context)
launch_job_sequence.run(jobs)
for delta in deltas:
# The Cluster is fetched every time so that its node groups have
# correct 'count' values.
cluster = self.clients("sahara").clusters.get(cluster_id)
LOG.debug("Scaling cluster %s with delta %d"
% (cluster.name, delta))
if delta == 0:
# Zero scaling makes no sense.
continue
elif delta > 0:
self._scale_cluster_up(cluster, delta)
elif delta < 0:
self._scale_cluster_down(cluster, delta)
LOG.debug("Starting Job sequence")
launch_job_sequence.run(jobs)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,666
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/magnum/k8s_pods.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.magnum import utils
"""Scenarios for Kubernetes pods and rcs."""
@validation.add("required_services", services=consts.Service.MAGNUM)
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="K8sPods.list_pods", platform="openstack")
class ListPods(utils.MagnumScenario):
def run(self):
"""List all pods.
"""
self._list_v1pods()
@validation.add("required_services", services=consts.Service.MAGNUM)
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="K8sPods.create_pods", platform="openstack")
class CreatePods(utils.MagnumScenario):
def run(self, manifests):
"""create pods and wait for them to be ready.
:param manifests: manifest files used to create the pods
"""
for manifest in manifests:
with open(manifest, "r") as f:
manifest_str = f.read()
manifest = yaml.safe_load(manifest_str)
pod = self._create_v1pod(manifest)
msg = ("Pod isn't created")
self.assertTrue(pod, err_msg=msg)
@validation.add("required_services", services=consts.Service.MAGNUM)
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="K8sPods.create_rcs", platform="openstack")
class CreateRcs(utils.MagnumScenario):
def run(self, manifests):
"""create rcs and wait for them to be ready.
:param manifests: manifest files use to create the rcs
"""
for manifest in manifests:
with open(manifest, "r") as f:
manifest_str = f.read()
manifest = yaml.safe_load(manifest_str)
rc = self._create_v1rc(manifest)
msg = ("RC isn't created")
self.assertTrue(rc, err_msg=msg)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,667
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/quotas/utils.py
|
# Copyright 2014: Kylin Cloud
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from rally.task import atomic
from rally_openstack.task import scenario
class QuotasScenario(scenario.OpenStackScenario):
"""Base class for quotas scenarios with basic atomic actions."""
@atomic.action_timer("quotas.update_quotas")
def _update_quotas(self, component, tenant_id, max_quota=1024,
quota_update_fn=None):
"""Updates quotas.
:param component: Component for the quotas.
:param tenant_id: The project_id for the quotas to be updated.
:param max_quota: Max value to be updated for quota.
:param quota_update_fn: Client quota update function.
Standard OpenStack clients use quotas.update().
Use `quota_update_fn` to override for non-standard clients.
:returns: Updated quotas dictionary.
"""
quotas = self._generate_quota_values(max_quota, component)
if quota_update_fn:
return quota_update_fn(tenant_id, **quotas)
return self.admin_clients(component).quotas.update(tenant_id, **quotas)
@atomic.action_timer("quotas.delete_quotas")
def _delete_quotas(self, component, tenant_id):
"""Delete quotas.
:param component: Component for the quotas.
:param tenant_id: The project_id for the quotas to be updated.
"""
self.admin_clients(component).quotas.delete(tenant_id)
def _generate_quota_values(self, max_quota, component):
quotas = {}
if component == "nova":
quotas = {
"metadata_items": random.randint(-1, max_quota),
"key_pairs": random.randint(-1, max_quota),
"injected_file_content_bytes": random.randint(-1, max_quota),
"injected_file_path_bytes": random.randint(-1, max_quota),
"ram": random.randint(-1, max_quota),
"instances": random.randint(-1, max_quota),
"injected_files": random.randint(-1, max_quota),
"cores": random.randint(-1, max_quota)
}
elif component == "cinder":
quotas = {
"volumes": random.randint(-1, max_quota),
"snapshots": random.randint(-1, max_quota),
"gigabytes": random.randint(-1, max_quota),
}
elif component == "neutron":
quota = {}
for key in ["network", "subnet", "port", "router", "floatingip",
"security_group", "security_group_rule"]:
quota[key] = random.randint(-1, max_quota)
quotas = {"body": {"quota": quota}}
return quotas
@atomic.action_timer("quotas.get_quotas")
def _get_quotas(self, component, tenant_id):
"""Get quotas for a project.
:param component: Openstack component for the quotas.
:param tenant_id: The project_id for the quotas to show.
:return: Get quotas for a project.
"""
return self.admin_clients(component).quotas.get(tenant_id)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,668
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/senlin/profiles.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import validation
from rally_openstack.common import consts
from rally_openstack.task import context
from rally_openstack.task.scenarios.senlin import utils as senlin_utils
@validation.add("required_platform", platform="openstack", users=True)
@context.configure(name="profiles", platform="openstack", order=190)
class ProfilesGenerator(context.OpenStackContext):
"""Context creates a temporary profile for Senlin test."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"type": {
"type": "string",
},
"version": {
"type": "string",
},
"properties": {
"type": "object",
"additionalProperties": True,
}
},
"additionalProperties": False,
"required": ["type", "version", "properties"]
}
def setup(self):
"""Create test profiles."""
for user, tenant_id in self._iterate_per_tenants():
senlin_scenario = senlin_utils.SenlinScenario({
"user": user,
"task": self.context["task"]
})
profile = senlin_scenario._create_profile(self.config)
self.context["tenants"][tenant_id]["profile"] = profile.id
def cleanup(self):
"""Delete created test profiles."""
for user, tenant_id in self._iterate_per_tenants():
senlin_scenario = senlin_utils.SenlinScenario({
"user": user,
"task": self.context["task"]
})
senlin_scenario._delete_profile(
self.context["tenants"][tenant_id]["profile"])
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,669
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/authenticate/test_authenticate.py
|
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.scenarios.authenticate import authenticate
from tests.unit import test
class AuthenticateTestCase(test.ScenarioTestCase):
def test_keystone(self):
scenario_inst = authenticate.Keystone()
scenario_inst.run()
self.assertTrue(self.client_created("keystone"))
self._test_atomic_action_timer(scenario_inst.atomic_actions(),
"authenticate.keystone")
def test_validate_glance(self):
scenario_inst = authenticate.ValidateGlance()
scenario_inst.run(5)
# NOTE(stpierre): We can't use assert_has_calls() here because
# that includes calls on the return values of the mock object
# as well. Glance (and Heat and Monasca, tested below) returns
# an iterator that the scenario wraps in list() in order to
# force glanceclient to actually make the API call, and this
# results in a bunch of call().__iter__() and call().__len__()
# calls that aren't matched if we use assert_has_calls().
self.assertCountEqual(
self.clients("glance").images.list.call_args_list,
[mock.call(name=mock.ANY)] * 5)
self._test_atomic_action_timer(scenario_inst.atomic_actions(),
"authenticate.validate_glance")
def test_validate_nova(self):
scenario_inst = authenticate.ValidateNova()
scenario_inst.run(5)
self.clients("nova").flavors.list.assert_has_calls([mock.call()] * 5)
self._test_atomic_action_timer(scenario_inst.atomic_actions(),
"authenticate.validate_nova")
def test_validate_ceilometer(self):
scenario_inst = authenticate.ValidateCeilometer()
scenario_inst.run(5)
self.clients("ceilometer").meters.list.assert_has_calls(
[mock.call()] * 5)
self._test_atomic_action_timer(
scenario_inst.atomic_actions(),
"authenticate.validate_ceilometer")
def test_validate_cinder(self):
scenario_inst = authenticate.ValidateCinder()
scenario_inst.run(5)
self.clients("cinder").volume_types.list.assert_has_calls(
[mock.call()] * 5)
self._test_atomic_action_timer(scenario_inst.atomic_actions(),
"authenticate.validate_cinder")
def test_validate_neutron(self):
scenario_inst = authenticate.ValidateNeutron()
scenario_inst.run(5)
self.clients("neutron").list_networks.assert_has_calls(
[mock.call()] * 5)
self._test_atomic_action_timer(scenario_inst.atomic_actions(),
"authenticate.validate_neutron")
def test_validate_octavia(self):
scenario_inst = authenticate.ValidateOctavia()
scenario_inst.run(5)
self.clients("octavia").load_balancer_list.assert_has_calls(
[mock.call()] * 5)
self._test_atomic_action_timer(scenario_inst.atomic_actions(),
"authenticate.validate_octavia")
def test_validate_heat(self):
scenario_inst = authenticate.ValidateHeat()
scenario_inst.run(5)
self.assertCountEqual(
self.clients("heat").stacks.list.call_args_list,
[mock.call(limit=0)] * 5)
self._test_atomic_action_timer(scenario_inst.atomic_actions(),
"authenticate.validate_heat")
def test_validate_monasca(self):
scenario_inst = authenticate.ValidateMonasca()
scenario_inst.run(5)
self.assertCountEqual(
self.clients("monasca").metrics.list.call_args_list,
[mock.call(limit=0)] * 5)
self._test_atomic_action_timer(scenario_inst.atomic_actions(),
"authenticate.validate_monasca")
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,670
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/common/services/image/test_glance_v2.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
import fixtures
from rally_openstack.common.services.image import glance_v2
from tests.unit import test
PATH = "rally_openstack.common.services.image"
@ddt.ddt
class GlanceV2ServiceTestCase(test.TestCase):
def setUp(self):
super(GlanceV2ServiceTestCase, self).setUp()
self.clients = mock.MagicMock()
self.gc = self.clients.glance.return_value
self.name_generator = mock.MagicMock()
self.service = glance_v2.GlanceV2Service(
self.clients, name_generator=self.name_generator)
self.mock_wait_for_status = fixtures.MockPatch(
"rally.task.utils.wait_for_status")
self.useFixture(self.mock_wait_for_status)
def _get_temp_file_name(self):
# return a temp file that will be cleaned automatically
temp_dir = self.useFixture(fixtures.TempDir())
return temp_dir.join("temp-file-name")
@ddt.data({"location": "image_location", "temp": False},
{"location": "image location", "temp": True})
@ddt.unpack
@mock.patch("requests.get")
def test_upload(self, mock_requests_get, location, temp):
image_id = "foo"
# override the location with a private temp file
if temp:
location = self._get_temp_file_name()
self.service.upload_data(image_id, image_location=location)
mock_requests_get.assert_called_once_with(location, stream=True,
verify=False)
self.gc.images.upload.assert_called_once_with(
image_id, mock_requests_get.return_value.raw)
@mock.patch("%s.glance_v2.GlanceV2Service.upload_data" % PATH)
def test_create_image(self, mock_upload_data):
image_name = "image_name"
container_format = "container_format"
disk_format = "disk_format"
visibility = "public"
properties = {"fakeprop": "fake"}
location = "location"
image = self.service.create_image(
image_name=image_name,
container_format=container_format,
image_location=location,
disk_format=disk_format,
visibility=visibility,
properties=properties)
call_args = {"container_format": container_format,
"disk_format": disk_format,
"name": image_name,
"visibility": visibility,
"min_disk": 0,
"min_ram": 0,
"fakeprop": "fake"}
self.gc.images.create.assert_called_once_with(**call_args)
self.assertEqual(image, self.mock_wait_for_status.mock.return_value)
mock_upload_data.assert_called_once_with(
self.mock_wait_for_status.mock.return_value.id,
image_location=location)
def test_update_image(self):
image_id = "image_id"
image_name1 = self.name_generator.return_value
image_name2 = "image_name"
min_disk = 0
min_ram = 0
remove_props = None
# case: image_name is None:
call_args1 = {"image_id": image_id,
"name": image_name1,
"min_disk": min_disk,
"min_ram": min_ram,
"remove_props": remove_props}
image1 = self.service.update_image(image_id=image_id,
image_name=None,
min_disk=min_disk,
min_ram=min_ram,
remove_props=remove_props)
self.assertEqual(self.gc.images.update.return_value, image1)
self.gc.images.update.assert_called_once_with(**call_args1)
# case: image_name is not None:
call_args2 = {"image_id": image_id,
"name": image_name2,
"min_disk": min_disk,
"min_ram": min_ram,
"remove_props": remove_props}
image2 = self.service.update_image(image_id=image_id,
image_name=image_name2,
min_disk=min_disk,
min_ram=min_ram,
remove_props=remove_props)
self.assertEqual(self.gc.images.update.return_value, image2)
self.gc.images.update.assert_called_with(**call_args2)
def test_list_images(self):
status = "active"
kwargs = {"status": status}
filters = {"filters": kwargs}
self.gc.images.list.return_value = iter([1, 2, 3])
self.assertEqual([1, 2, 3], self.service.list_images())
self.gc.images.list.assert_called_once_with(**filters)
def test_set_visibility(self):
image_id = "image_id"
visibility = "shared"
self.service.set_visibility(image_id=image_id)
self.gc.images.update.assert_called_once_with(
image_id,
visibility=visibility)
def test_deactivate_image(self):
image_id = "image_id"
self.service.deactivate_image(image_id)
self.gc.images.deactivate.assert_called_once_with(image_id)
def test_reactivate_image(self):
image_id = "image_id"
self.service.reactivate_image(image_id)
self.gc.images.reactivate.assert_called_once_with(image_id)
@ddt.ddt
class UnifiedGlanceV2ServiceTestCase(test.TestCase):
def setUp(self):
super(UnifiedGlanceV2ServiceTestCase, self).setUp()
self.clients = mock.MagicMock()
self.service = glance_v2.UnifiedGlanceV2Service(self.clients)
self.service._impl = mock.create_autospec(self.service._impl)
@mock.patch("%s.glance_common.UnifiedGlanceMixin._unify_image" % PATH)
def test_create_image(self, mock_image__unify_image):
image_name = "image_name"
container_format = "container_format"
image_location = "image_location"
disk_format = "disk_format"
visibility = "public"
properties = {"fakeprop": "fake"}
callargs = {"image_name": image_name,
"container_format": container_format,
"image_location": image_location,
"disk_format": disk_format,
"visibility": visibility,
"min_disk": 0,
"min_ram": 0,
"properties": properties}
image = self.service.create_image(image_name=image_name,
container_format=container_format,
image_location=image_location,
disk_format=disk_format,
visibility=visibility,
properties=properties)
self.assertEqual(mock_image__unify_image.return_value, image)
self.service._impl.create_image.assert_called_once_with(**callargs)
@mock.patch("%s.glance_common.UnifiedGlanceMixin._unify_image" % PATH)
def test_update_image(self, mock_image__unify_image):
image_id = "image_id"
image_name = "image_name"
callargs = {"image_id": image_id,
"image_name": image_name,
"min_disk": 0,
"min_ram": 0,
"remove_props": None}
image = self.service.update_image(image_id,
image_name=image_name)
self.assertEqual(mock_image__unify_image.return_value, image)
self.service._impl.update_image.assert_called_once_with(**callargs)
@mock.patch("%s.glance_common.UnifiedGlanceMixin._unify_image" % PATH)
def test_list_images(self, mock_image__unify_image):
images = [mock.MagicMock()]
self.service._impl.list_images.return_value = images
status = "active"
self.assertEqual([mock_image__unify_image.return_value],
self.service.list_images(owner="foo",
visibility="shared"))
self.service._impl.list_images.assert_called_once_with(
status=status,
visibility="shared",
owner="foo"
)
def test_set_visibility(self):
image_id = "image_id"
visibility = "private"
self.service.set_visibility(image_id=image_id, visibility=visibility)
self.service._impl.set_visibility.assert_called_once_with(
image_id=image_id, visibility=visibility)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,671
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/ui/charts/test_osprofilerchart.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime as dt
import os
from unittest import mock
from rally_openstack.task.ui.charts import osprofilerchart as osp_chart
from tests.unit import test
PATH = "rally_openstack.task.ui.charts.osprofilerchart"
CHART_PATH = "%s.OSProfilerChart" % PATH
class OSProfilerChartTestCase(test.TestCase):
def test__datetime_json_serialize(self):
ts = dt.datetime(year=2018, month=7, day=3, hour=2)
self.assertEqual("2018-07-03T02:00:00",
osp_chart._datetime_json_serialize(ts))
self.assertEqual("A", osp_chart._datetime_json_serialize("A"))
def test__return_raw_response_for_complete_data(self):
title = "TITLE"
trace_id = "trace-id"
r = osp_chart.OSProfilerChart._return_raw_response_for_complete_data(
{"title": title, "data": {"trace_id": trace_id}}
)
self.assertEqual(
{"title": title, "widget": "TextArea", "data": [trace_id]},
r
)
def test__generate_osprofiler_report(self):
data = {"ts": dt.datetime(year=2018, month=7, day=3, hour=2)}
mock_open = mock.mock_open(read_data="local=$LOCAL | data=$DATA")
with mock.patch.object(osp_chart, "open", mock_open):
r = osp_chart.OSProfilerChart._generate_osprofiler_report(data)
self.assertEqual(
"local=false | data={\n \"ts\": \"2018-07-03T02:00:00\"\n}",
r
)
self.assertEqual(1, mock_open.call_count)
m_args, _m_kwargs = mock_open.call_args_list[0]
self.assertTrue(os.path.exists(m_args[0]))
def test__fetch_osprofiler_data(self):
connection_str = "https://example.com"
trace_id = "trace-id"
mock_osp_drivers = mock.Mock()
mock_osp_driver = mock_osp_drivers.base
with mock.patch.dict(
"sys.modules", {"osprofiler.drivers": mock_osp_drivers}):
r = osp_chart.OSProfilerChart._fetch_osprofiler_data(
connection_str, trace_id)
self.assertIsNotNone(r)
mock_osp_driver.get_driver.assert_called_once_with(connection_str)
engine = mock_osp_driver.get_driver.return_value
engine.get_report.assert_called_once_with(trace_id)
self.assertEqual(engine.get_report.return_value, r)
mock_osp_driver.get_driver.side_effect = Exception("Something")
with mock.patch.dict(
"sys.modules", {"osprofiler.drivers": mock_osp_drivers}):
r = osp_chart.OSProfilerChart._fetch_osprofiler_data(
connection_str, trace_id)
self.assertIsNone(r)
@mock.patch("%s.charts.OutputEmbeddedExternalChart" % PATH)
@mock.patch("%s.charts.OutputEmbeddedChart" % PATH)
@mock.patch("%s._return_raw_response_for_complete_data" % CHART_PATH)
@mock.patch("%s._fetch_osprofiler_data" % CHART_PATH)
@mock.patch("%s._generate_osprofiler_report" % CHART_PATH)
def test_render_complete_data(
self, mock__generate_osprofiler_report,
mock__fetch_osprofiler_data,
mock__return_raw_response_for_complete_data,
mock_output_embedded_chart,
mock_output_embedded_external_chart
):
trace_id = "trace-id"
title = "TITLE"
# case 1: no connection-id, so data fpr text chart should be returned
pdata = {"data": {"trace_id": trace_id}, "title": title}
self.assertEqual(
mock__return_raw_response_for_complete_data.return_value,
osp_chart.OSProfilerChart.render_complete_data(
copy.deepcopy(pdata))
)
mock__return_raw_response_for_complete_data.assert_called_once_with(
pdata
)
mock__return_raw_response_for_complete_data.reset_mock()
# case 2: check support for an old format when `trace_id` key is a list
pdata = {"data": {"trace_id": [trace_id]}, "title": title}
self.assertEqual(
mock__return_raw_response_for_complete_data.return_value,
osp_chart.OSProfilerChart.render_complete_data(
copy.deepcopy(pdata))
)
pdata["data"]["trace_id"] = pdata["data"]["trace_id"][0]
mock__return_raw_response_for_complete_data.assert_called_once_with(
pdata
)
mock__return_raw_response_for_complete_data.reset_mock()
# case 3: connection-id is provided, but osp backed is not available
mock__fetch_osprofiler_data.return_value = None
pdata = {"data": {"trace_id": trace_id, "conn_str": "conn"},
"title": title}
self.assertEqual(
mock__return_raw_response_for_complete_data.return_value,
osp_chart.OSProfilerChart.render_complete_data(
copy.deepcopy(pdata))
)
mock__return_raw_response_for_complete_data.assert_called_once_with(
pdata
)
mock__return_raw_response_for_complete_data.reset_mock()
# case 4: connection-id is provided
mock__fetch_osprofiler_data.return_value = "OSP_DATA"
mock__generate_osprofiler_report.return_value = "DD"
pdata = {"data": {"trace_id": trace_id, "conn_str": "conn"},
"title": title}
self.assertEqual(
mock_output_embedded_chart.render_complete_data.return_value,
osp_chart.OSProfilerChart.render_complete_data(
copy.deepcopy(pdata))
)
mock_output_embedded_chart.render_complete_data.\
assert_called_once_with({"title": "%s : %s" % (title, trace_id),
"widget": "EmbeddedChart",
"data": "DD"})
self.assertFalse(mock__return_raw_response_for_complete_data.called)
mock_output_embedded_chart.render_complete_data.reset_mock()
# case 5: connection-id is provided with workload-id an
pdata = {"data": {"trace_id": trace_id,
"conn_str": "conn",
"workload_uuid": "W_ID",
"iteration": 777},
"title": title}
mock_open = mock.mock_open()
with mock.patch.object(osp_chart, "open", mock_open):
with mock.patch("%s.CONF.openstack" % PATH) as mock_cfg_os:
mock_cfg_os.osprofiler_chart_mode = "/path"
r = osp_chart.OSProfilerChart.render_complete_data(
copy.deepcopy(pdata))
mock_external_chat = mock_output_embedded_external_chart
self.assertEqual(
mock_external_chat.render_complete_data.return_value,
r
)
mock_external_chat.render_complete_data.\
assert_called_once_with({"title": "%s : %s" % (title, trace_id),
"widget": "EmbeddedChart",
"data": "/path/w_W_ID-777.html"})
self.assertFalse(mock__return_raw_response_for_complete_data.called)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,672
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/gnocchi/test_metric.py
|
# Copyright 2017 Red Hat, Inc. <http://www.redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.scenarios.gnocchi import metric
from tests.unit import test
class GnocchiMetricTestCase(test.ScenarioTestCase):
def get_test_context(self):
context = super(GnocchiMetricTestCase, self).get_test_context()
context.update({
"admin": {
"user_id": "fake",
"credential": mock.MagicMock()
},
"user": {
"user_id": "fake",
"credential": mock.MagicMock()
},
"tenant": {"id": "fake"}
})
return context
def setUp(self):
super(GnocchiMetricTestCase, self).setUp()
patch = mock.patch(
"rally_openstack.common.services.gnocchi.metric.GnocchiService")
self.addCleanup(patch.stop)
self.mock_metric = patch.start()
def test_list_metric(self):
metric_service = self.mock_metric.return_value
scenario = metric.ListMetric(self.context)
scenario.run(limit=42)
metric_service.list_metric.assert_called_once_with(limit=42)
def test_create_metric(self):
metric_service = self.mock_metric.return_value
scenario = metric.CreateMetric(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario.run(archive_policy_name="foo", resource_id="123", unit="u")
metric_service.create_metric.assert_called_once_with(
"name", archive_policy_name="foo", resource_id="123", unit="u")
def test_create_delete_metric(self):
metric_service = self.mock_metric.return_value
scenario = metric.CreateDeleteMetric(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario.run(archive_policy_name="bar", resource_id="123", unit="v")
metric_service.create_metric.assert_called_once_with(
"name", archive_policy_name="bar", resource_id="123", unit="v")
self.assertEqual(1, metric_service.delete_metric.call_count)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,673
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/rally_jobs/test_jobs.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import tempfile
import traceback
from unittest import mock
from rally import api
from rally.cli import yamlutils as yaml
from rally.common.plugin import discover
from rally.task import engine
from rally.task import task_cfg
import rally_openstack
from tests.unit import fakes
from tests.unit import test
class RallyJobsTestCase(test.TestCase):
rally_jobs_path = os.path.join(
os.path.dirname(rally_openstack.__file__), "..", "rally-jobs")
def setUp(self):
super(RallyJobsTestCase, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(self.tmp_dir, ".rally"))
shutil.copytree(os.path.join(self.rally_jobs_path, "extra"),
os.path.join(self.tmp_dir, ".rally", "extra"))
self.original_home = os.environ["HOME"]
os.environ["HOME"] = self.tmp_dir
def return_home():
os.environ["HOME"] = self.original_home
self.addCleanup(shutil.rmtree, self.tmp_dir)
self.addCleanup(return_home)
def test_schema_is_valid(self):
discover.load_plugins(os.path.join(self.rally_jobs_path, "plugins"))
files = {f for f in os.listdir(self.rally_jobs_path)
if (os.path.isfile(os.path.join(self.rally_jobs_path, f))
and f.endswith(".yaml")
and not f.endswith("_args.yaml"))}
# TODO(andreykurilin): figure out why it fails
files -= {"rally-mos.yaml", "sahara-clusters.yaml"}
for filename in files:
full_path = os.path.join(self.rally_jobs_path, filename)
with open(full_path) as task_file:
try:
args_file = os.path.join(
self.rally_jobs_path,
filename.rsplit(".", 1)[0] + "_args.yaml")
args = {}
if os.path.exists(args_file):
args = yaml.safe_load(open(args_file).read())
if not isinstance(args, dict):
raise TypeError(
"args file %s must be dict in yaml or json "
"presentation" % args_file)
task_inst = api._Task(api.API(skip_db_check=True))
task = task_inst.render_template(
task_template=task_file.read(), **args)
task = task_cfg.TaskConfig(yaml.safe_load(task))
task_obj = fakes.FakeTask({"uuid": full_path})
eng = engine.TaskEngine(task, task_obj, mock.Mock())
eng.validate(only_syntax=True)
except Exception:
print(traceback.format_exc())
self.fail("Wrong task input file: %s" % full_path)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,674
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/watcher/test_basic.py
|
# Copyright 2016: Servionica LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.scenarios.watcher import basic
from tests.unit import test
class WatcherTestCase(test.ScenarioTestCase):
def test_create_audit_template_and_delete(self):
scenario = basic.CreateAuditTemplateAndDelete(self.context)
audit_template = mock.Mock()
scenario._create_audit_template = mock.MagicMock(
return_value=audit_template)
scenario._delete_audit_template = mock.MagicMock()
scenario.run("goal", "strategy")
scenario._create_audit_template.assert_called_once_with("goal",
"strategy")
scenario._delete_audit_template.assert_called_once_with(
audit_template.uuid)
def test_list_audit_template(self):
scenario = basic.ListAuditTemplates(self.context)
scenario._list_audit_templates = mock.MagicMock()
scenario.run()
scenario._list_audit_templates.assert_called_once_with(
detail=False, goal=None, limit=None, name=None, sort_dir=None,
sort_key=None, strategy=None)
def test_create_audit_and_delete(self):
mock_audit = mock.MagicMock()
scenario = basic.CreateAuditAndDelete(self.context)
scenario.context = mock.MagicMock()
scenario._create_audit = mock.MagicMock(return_value=mock_audit)
scenario.sleep_between = mock.MagicMock()
scenario._delete_audit = mock.MagicMock()
scenario.run()
scenario._create_audit.assert_called_once_with(mock.ANY)
scenario._delete_audit.assert_called_once_with(mock_audit)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,675
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/magnum/test_k8s_pods.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
from rally import exceptions
from rally_openstack.task.scenarios.magnum import k8s_pods
from tests.unit import test
@ddt.ddt
class K8sPodsTestCase(test.ScenarioTestCase):
def test_list_pods(self):
scenario = k8s_pods.ListPods()
scenario._list_v1pods = mock.Mock()
scenario.run()
scenario._list_v1pods.assert_called_once_with()
@ddt.data(["manifest.json"], ["manifest.yaml"])
def test_create_pods(self, manifests):
manifest = manifests[0]
scenario = k8s_pods.CreatePods()
file_content = "data: fake_content"
if manifest == "manifest.json":
file_content = "{\"data\": \"fake_content\"}"
file_mock = mock.mock_open(read_data=file_content)
fake_pod = mock.Mock()
scenario._create_v1pod = mock.MagicMock(return_value=fake_pod)
with mock.patch(
"rally_openstack.task.scenarios.magnum.k8s_pods.open",
file_mock, create=True) as m:
scenario.run(manifests)
m.assert_called_once_with(manifest, "r")
m.return_value.read.assert_called_once_with()
scenario._create_v1pod.assert_called_once_with(
{"data": "fake_content"})
# test error cases:
# 1. pod not created
scenario._create_v1pod = mock.MagicMock(return_value=None)
with mock.patch(
"rally_openstack.task.scenarios.magnum.k8s_pods.open",
file_mock, create=True) as m:
self.assertRaises(
exceptions.RallyAssertionError,
scenario.run, manifests)
m.assert_called_with(manifest, "r")
m.return_value.read.assert_called_with()
scenario._create_v1pod.assert_called_with(
{"data": "fake_content"})
@ddt.data(["manifest.json"], ["manifest.yaml"])
def test_create_rcs(self, manifests):
manifest = manifests[0]
scenario = k8s_pods.CreateRcs()
file_content = "data: fake_content"
if manifest == "manifest.json":
file_content = "{\"data\": \"fake_content\"}"
file_mock = mock.mock_open(read_data=file_content)
fake_rc = mock.Mock()
scenario._create_v1rc = mock.MagicMock(return_value=fake_rc)
with mock.patch(
"rally_openstack.task.scenarios.magnum.k8s_pods.open",
file_mock, create=True) as m:
scenario.run(manifests)
m.assert_called_once_with(manifest, "r")
m.return_value.read.assert_called_once_with()
scenario._create_v1rc.assert_called_once_with({"data": "fake_content"})
# test error cases:
# 1. rc not created
scenario._create_v1rc = mock.MagicMock(return_value=None)
with mock.patch(
"rally_openstack.task.scenarios.magnum.k8s_pods.open",
file_mock, create=True) as m:
self.assertRaises(
exceptions.RallyAssertionError,
scenario.run, manifests)
m.assert_called_with(manifest, "r")
m.return_value.read.assert_called_with()
scenario._create_v1rc.assert_called_with({"data": "fake_content"})
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,676
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/verification/tempest/test_manager.py
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import subprocess
from unittest import mock
from rally import exceptions
from rally_openstack.verification.tempest import manager
from tests.unit import test
PATH = "rally_openstack.verification.tempest.manager"
class TempestManagerTestCase(test.TestCase):
def test_run_environ_property(self):
mock.patch("%s.testr.TestrLauncher.run_environ" % PATH,
new={"some": "key"}).start()
tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd"))
env = {"some": "key",
"OS_TEST_PATH": os.path.join(tempest.repo_dir,
"tempest/test_discover"),
"TEMPEST_CONFIG": "tempest.conf",
"TEMPEST_CONFIG_DIR": os.path.dirname(tempest.configfile)}
self.assertEqual(env, tempest.run_environ)
def test_configfile_property(self):
tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd"))
self.assertEqual(os.path.join(tempest.home_dir, "tempest.conf"),
tempest.configfile)
@mock.patch("%s.open" % PATH, side_effect=mock.mock_open())
def test_get_configuration(self, mock_open):
tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd"))
tempest.get_configuration()
mock_open.assert_called_once_with(tempest.configfile)
mock_open.side_effect().read.assert_called_once_with()
@mock.patch("%s.config.TempestConfigfileManager" % PATH)
def test_configure(self, mock_tempest_configfile_manager):
tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd"))
cm = mock_tempest_configfile_manager.return_value
extra_options = mock.Mock()
self.assertEqual(cm.create.return_value,
tempest.configure(extra_options))
mock_tempest_configfile_manager.assert_called_once_with(
tempest.verifier.env)
cm.create.assert_called_once_with(tempest.configfile, extra_options)
@mock.patch("%s.config.os.path.exists" % PATH)
def test_is_configured(self, mock_exists):
tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd"))
self.assertTrue(tempest.is_configured())
@mock.patch("rally.verification.utils.extend_configfile")
def test_extend_configuration(self, mock_extend_configfile):
tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd"))
extra_options = mock.Mock()
self.assertEqual(mock_extend_configfile.return_value,
tempest.extend_configuration(extra_options))
mock_extend_configfile.assert_called_once_with(extra_options,
tempest.configfile)
@mock.patch("%s.open" % PATH, side_effect=mock.mock_open())
def test_override_configuration(self, mock_open):
tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd"))
new_content = mock.Mock()
tempest.override_configuration(new_content)
mock_open.assert_called_once_with(tempest.configfile, "w")
mock_open.side_effect().write.assert_called_once_with(new_content)
@mock.patch("%s.os.path.exists" % PATH)
@mock.patch("%s.utils.check_output" % PATH)
@mock.patch("%s.TempestManager.check_system_wide" % PATH)
def test_install_extension(self, mock_check_system_wide, mock_check_output,
mock_exists):
tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd",
system_wide=True))
e = self.assertRaises(NotImplementedError, tempest.install_extension,
None, None, {"key": "value"})
self.assertIn("verifiers don't support extra installation settings",
"%s" % e)
test_reqs_path = os.path.join(tempest.base_dir, "extensions",
"example", "test-requirements.txt")
# case #1 system-wide installation
source = "https://github.com/example/example"
tempest.install_extension(source)
path = os.path.join(tempest.base_dir, "extensions")
mock_check_output.assert_called_once_with(
["pip", "install", "--no-deps", "--src", path, "-e",
"git+https://github.com/example/example@master#egg=example"],
cwd=tempest.base_dir, env=tempest.environ)
mock_check_system_wide.assert_called_once_with(
reqs_file_path=test_reqs_path)
mock_check_output.reset_mock()
# case #2 virtual env with specified version
tempest.verifier.system_wide = False
version = "some"
tempest.install_extension(source, version=version)
self.assertEqual([
mock.call([
"pip", "install", "--src", path, "-e",
"git+https://github.com/example/example@some#egg=example"],
cwd=tempest.base_dir, env=tempest.environ),
mock.call(["pip", "install", "-r", test_reqs_path],
cwd=tempest.base_dir, env=tempest.environ)],
mock_check_output.call_args_list)
@mock.patch("%s.utils.check_output" % PATH)
def test_list_extensions(self, mock_check_output):
plugins_list = [
{"name": "some", "entry_point": "foo.bar", "location": "/tmp"},
{"name": "another", "entry_point": "bar.foo", "location": "/tmp"}
]
mock_check_output.return_value = json.dumps(plugins_list)
tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd"))
self.assertEqual(plugins_list, tempest.list_extensions())
self.assertEqual(1, mock_check_output.call_count)
mock_check_output.reset_mock()
mock_check_output.side_effect = subprocess.CalledProcessError("", "")
self.assertRaises(exceptions.RallyException, tempest.list_extensions)
self.assertEqual(1, mock_check_output.call_count)
@mock.patch("%s.TempestManager.list_extensions" % PATH)
@mock.patch("%s.os.path.exists" % PATH)
@mock.patch("%s.shutil.rmtree" % PATH)
def test_uninstall_extension(self, mock_rmtree, mock_exists,
mock_list_extensions):
plugins_list = [
{"name": "some", "entry_point": "foo.bar", "location": "/tmp"},
{"name": "another", "entry_point": "bar.foo", "location": "/tmp"}
]
mock_list_extensions.return_value = plugins_list
tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd"))
tempest.uninstall_extension("some")
mock_rmtree.assert_called_once_with(plugins_list[0]["location"])
mock_list_extensions.assert_called_once_with()
mock_rmtree.reset_mock()
mock_list_extensions.reset_mock()
self.assertRaises(exceptions.RallyException,
tempest.uninstall_extension, "unexist")
mock_list_extensions.assert_called_once_with()
self.assertFalse(mock_rmtree.called)
@mock.patch("%s.TempestManager._transform_pattern" % PATH)
@mock.patch("%s.testr.TestrLauncher.list_tests" % PATH)
def test_list_tests(self, mock_testr_launcher_list_tests,
mock__transform_pattern):
tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd"))
self.assertEqual(mock_testr_launcher_list_tests.return_value,
tempest.list_tests())
mock_testr_launcher_list_tests.assert_called_once_with("")
self.assertFalse(mock__transform_pattern.called)
mock_testr_launcher_list_tests.reset_mock()
pattern = mock.Mock()
self.assertEqual(mock_testr_launcher_list_tests.return_value,
tempest.list_tests(pattern))
mock_testr_launcher_list_tests.assert_called_once_with(
mock__transform_pattern.return_value)
mock__transform_pattern.assert_called_once_with(pattern)
@mock.patch("%s.testr.TestrLauncher.validate_args" % PATH)
def test_validate_args(self, mock_testr_launcher_validate_args):
tm = manager.TempestManager(mock.Mock())
tm.validate_args({})
tm.validate_args({"pattern": "some.test"})
tm.validate_args({"pattern": "set=smoke"})
tm.validate_args({"pattern": "set=compute"})
tm.validate_args({"pattern": "set=full"})
e = self.assertRaises(exceptions.ValidationError, tm.validate_args,
{"pattern": "foo=bar"})
self.assertEqual("Validation error: 'pattern' argument should be a "
"regexp or set name (format: 'tempest.api.identity."
"v3', 'set=smoke').", "%s" % e)
e = self.assertRaises(exceptions.ValidationError, tm.validate_args,
{"pattern": "set=foo"})
self.assertIn("Test set 'foo' not found in available Tempest test "
"sets. Available sets are ", "%s" % e)
def test__transform_pattern(self):
tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd"))
self.assertEqual("foo", tempest._transform_pattern("foo"))
self.assertEqual("foo=bar", tempest._transform_pattern("foo=bar"))
self.assertEqual("", tempest._transform_pattern("set=full"))
self.assertEqual("smoke", tempest._transform_pattern("set=smoke"))
self.assertEqual("tempest.bar", tempest._transform_pattern("set=bar"))
self.assertEqual("tempest.api.compute",
tempest._transform_pattern("set=compute"))
@mock.patch("%s.TempestManager._transform_pattern" % PATH)
def test_prepare_run_args(self, mock__transform_pattern):
tempest = manager.TempestManager(mock.MagicMock(uuid="uuuiiiddd"))
self.assertEqual({}, tempest.prepare_run_args({}))
self.assertFalse(mock__transform_pattern.called)
self.assertEqual({"foo": "bar"},
tempest.prepare_run_args({"foo": "bar"}))
self.assertFalse(mock__transform_pattern.called)
pattern = mock.Mock()
self.assertEqual({"pattern": mock__transform_pattern.return_value},
tempest.prepare_run_args({"pattern": pattern}))
mock__transform_pattern.assert_called_once_with(pattern)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,677
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/loadbalancer/test_loadbalancers.py
|
# Copyright 2018: Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.scenarios.octavia import loadbalancers
from tests.unit import test
class LoadBalancersTestCase(test.ScenarioTestCase):
def setUp(self):
super(LoadBalancersTestCase, self).setUp()
patch = mock.patch(
"rally_openstack.common.services.loadbalancer.octavia.Octavia")
self.addCleanup(patch.stop)
self.mock_loadbalancers = patch.start()
def _get_context(self):
context = super(LoadBalancersTestCase, self).get_test_context()
context.update({
"user": {
"id": "fake_user",
"tenant_id": "fake_tenant",
"credential": mock.MagicMock()
},
"tenant": {"id": "fake_tenant",
"networks": [{"id": "fake_net",
"subnets": ["fake_subnet"]}]}})
return context
def test_create_and_list_loadbalancers(self):
loadbalancer_service = self.mock_loadbalancers.return_value
scenario = loadbalancers.CreateAndListLoadbalancers(
self._get_context())
scenario.run()
loadbalancer_service.load_balancer_list.assert_called_once_with()
def test_create_and_delete_loadbalancers(self):
loadbalancer_service = self.mock_loadbalancers.return_value
scenario = loadbalancers.CreateAndDeleteLoadbalancers(
self._get_context())
scenario.run()
lb = [{
"loadbalancer": {
"id": "loadbalancer-id"
}
}]
loadbalancer_service.load_balancer_create.return_value = lb
loadbalancer_service.load_balancer_create(
admin_state=True, description=None, flavor_id=None,
listeners=None, provider=None,
subnet_id="fake_subnet", vip_qos_policy_id=None)
self.assertEqual(1,
loadbalancer_service.load_balancer_delete.call_count)
def test_create_and_update_loadbalancers(self):
loadbalancer_service = self.mock_loadbalancers.return_value
scenario = loadbalancers.CreateAndUpdateLoadBalancers(
self._get_context())
scenario.run()
lb = [{
"loadbalancer": {
"id": "loadbalancer-id"
}
}]
loadbalancer_service.load_balancer_create.return_value = lb
loadbalancer_service.load_balancer_create(
admin_state=True, description=None, flavor_id=None,
listeners=None, provider=None,
subnet_id="fake_subnet", vip_qos_policy_id=None)
self.assertEqual(1,
loadbalancer_service.load_balancer_set.call_count)
def test_create_and_show_stats(self):
loadbalancer_service = self.mock_loadbalancers.return_value
scenario = loadbalancers.CreateAndShowStatsLoadBalancers(
self._get_context())
scenario.run()
lb = [{
"loadbalancer": {
"id": "loadbalancer-id"
}
}]
loadbalancer_service.load_balancer_create.return_value = lb
loadbalancer_service.load_balancer_create(
admin_state=True, description=None, flavor_id=None,
listeners=None, provider=None,
subnet_id="fake_subnet", vip_qos_policy_id=None)
self.assertEqual(
1, loadbalancer_service.load_balancer_stats_show.call_count)
def test_create_and_show_loadbalancers(self):
loadbalancer_service = self.mock_loadbalancers.return_value
scenario = loadbalancers.CreateAndShowLoadBalancers(
self._get_context())
scenario.run()
lb = [{
"loadbalancer": {
"id": "loadbalancer-id"
}
}]
lb_show = {"id": "loadbalancer-id"}
loadbalancer_service.load_balancer_create.return_value = lb
loadbalancer_service.load_balancer_show.return_value = lb_show
loadbalancer_service.load_balancer_create(
admin_state=True, description=None, flavor_id=None,
listeners=None, provider=None,
subnet_id="fake_subnet", vip_qos_policy_id=None)
self.assertEqual(1,
loadbalancer_service.load_balancer_show.call_count)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,678
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/barbican/test_containers.py
|
# Copyright 2018 Red Hat Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.scenarios.barbican import containers
from tests.unit import test
class BarbicanContainersTestCase(test.ScenarioTestCase):
def get_test_context(self):
context = super(BarbicanContainersTestCase, self).get_test_context()
context.update({
"admin": {
"user_id": "fake",
"credential": mock.MagicMock()
},
"user": {
"user_id": "fake",
"credential": mock.MagicMock()
},
"tenant": {"id": "fake"}
})
return context
def setUp(self):
super(BarbicanContainersTestCase, self).setUp()
m = "rally_openstack.common.services.key_manager.barbican"
patch = mock.patch("%s.BarbicanService" % m)
self.addCleanup(patch.stop)
self.mock_secrets = patch.start()
def test_list_containers(self):
secrets_service = self.mock_secrets.return_value
scenario = containers.BarbicanContainersList(self.context)
scenario.run()
secrets_service.list_container.assert_called_once_with()
def test_generic_container_create_and_delete(self):
secrets_service = self.mock_secrets.return_value
fake_container = {"container_ref": "fake_container_ref"}
fake_container = secrets_service.container_create.return_value
scenario = containers.BarbicanContainersGenericCreateAndDelete(
self.context)
scenario.run()
secrets_service.container_create.assert_called_once_with()
secrets_service.container_delete.assert_called_once_with(
fake_container.container_ref)
def test_generic_container_create_and_add_secret(self):
secrets_service = self.mock_secrets.return_value
fake_container = {"container_ref": "fake_container_ref"}
fake_secrets = {"secret_ref": "fake_secret_ref"}
fake_container = secrets_service.container_create.return_value
fake_secrets = secrets_service.create_secret.return_value
scenario = containers.BarbicanContainersGenericCreateAndAddSecret(
self.context)
scenario.run()
secrets_service.create_secret.assert_called_once_with()
secrets_service.container_create.assert_called_once_with(
secrets={"secret": fake_secrets})
secrets_service.container_delete.assert_called_once_with(
fake_container.container_ref)
def test_certificate_coentaineri_create_and_delete(self):
secrets_service = self.mock_secrets.return_value
fake_container = {"container_ref": "fake_container_ref"}
fake_container = secrets_service.create_certificate_container \
.return_value
scenario = containers.BarbicanContainersCertificateCreateAndDelete(
self.context)
scenario.run()
secrets_service.create_certificate_container.assert_called_once_with()
secrets_service.container_delete.assert_called_once_with(
fake_container.container_ref)
def test_rsa_container_create_and_delete(self):
secrets_service = self.mock_secrets.return_value
fake_container = {"container_ref": "fake_container_ref"}
fake_container = secrets_service.create_rsa_container.return_value
scenario = containers.BarbicanContainersRSACreateAndDelete(
self.context)
scenario.run()
secrets_service.create_rsa_container.assert_called_once_with()
secrets_service.container_delete.assert_called_once_with(
fake_container.container_ref)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,679
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/common/services/image/test_glance_v1.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
import fixtures
from rally_openstack.common.services.image import glance_v1
from rally_openstack.common.services.image import image
from tests.unit import test
PATH = ("rally_openstack.common.services.image.glance_common."
"UnifiedGlanceMixin._unify_image")
@ddt.ddt
class GlanceV1ServiceTestCase(test.TestCase):
def setUp(self):
super(GlanceV1ServiceTestCase, self).setUp()
self.clients = mock.MagicMock()
self.gc = self.clients.glance.return_value
self.name_generator = mock.MagicMock()
self.service = glance_v1.GlanceV1Service(
self.clients, name_generator=self.name_generator)
self.mock_wait_for_status = fixtures.MockPatch(
"rally.task.utils.wait_for_status")
self.useFixture(self.mock_wait_for_status)
def _get_temp_file_name(self):
# return a temp file that will be cleaned automatically
temp_dir = self.useFixture(fixtures.TempDir())
return temp_dir.join("temp-file-name")
@ddt.data({"location": "image_location", "is_public": True, "temp": False},
{"location": "image_location", "is_public": False, "temp": True})
@ddt.unpack
def test_create_image(self, location, is_public, temp):
image_name = "image_name"
container_format = "container_format"
disk_format = "disk_format"
properties = {"fakeprop": "fake"}
# override the location with a private temp file
if temp:
location = self._get_temp_file_name()
image = self.service.create_image(
image_name=image_name,
container_format=container_format,
image_location=location,
disk_format=disk_format,
is_public=is_public,
properties=properties)
call_args = {"container_format": container_format,
"disk_format": disk_format,
"is_public": is_public,
"name": image_name,
"min_disk": 0,
"min_ram": 0,
"properties": properties,
"copy_from": location}
self.gc.images.create.assert_called_once_with(**call_args)
self.assertEqual(image, self.mock_wait_for_status.mock.return_value)
@ddt.data({"image_name": None},
{"image_name": "test_image_name"})
@ddt.unpack
def test_update_image(self, image_name):
image_id = "image_id"
min_disk = 0
min_ram = 0
expected_image_name = image_name or self.name_generator.return_value
image = self.service.update_image(image_id=image_id,
image_name=image_name,
min_disk=min_disk,
min_ram=min_ram)
self.assertEqual(self.gc.images.update.return_value, image)
self.gc.images.update.assert_called_once_with(image_id,
name=expected_image_name,
min_disk=min_disk,
min_ram=min_ram)
@ddt.data({"status": "activate", "is_public": True, "owner": "owner"},
{"status": "activate", "is_public": False, "owner": "owner"},
{"status": "activate", "is_public": None, "owner": "owner"})
@ddt.unpack
def test_list_images(self, status, is_public, owner):
self.service.list_images(is_public=is_public, status=status,
owner=owner)
self.gc.images.list.assert_called_once_with(status=status,
owner=owner,
is_public=is_public)
def test_set_visibility(self):
image_id = "image_id"
is_public = True
self.service.set_visibility(image_id=image_id)
self.gc.images.update.assert_called_once_with(
image_id, is_public=is_public)
@ddt.ddt
class UnifiedGlanceV1ServiceTestCase(test.TestCase):
def setUp(self):
super(UnifiedGlanceV1ServiceTestCase, self).setUp()
self.clients = mock.MagicMock()
self.service = glance_v1.UnifiedGlanceV1Service(self.clients)
self.service._impl = mock.create_autospec(self.service._impl)
@ddt.data({"visibility": "public"},
{"visibility": "private"})
@ddt.unpack
@mock.patch(PATH)
def test_create_image(self, mock_image__unify_image, visibility):
image_name = "image_name"
container_format = "container_format"
image_location = "image_location"
disk_format = "disk_format"
properties = {"fakeprop": "fake"}
image = self.service.create_image(image_name=image_name,
container_format=container_format,
image_location=image_location,
disk_format=disk_format,
visibility=visibility,
properties=properties)
is_public = visibility == "public"
callargs = {"image_name": image_name,
"container_format": container_format,
"image_location": image_location,
"disk_format": disk_format,
"is_public": is_public,
"min_disk": 0,
"min_ram": 0,
"properties": properties}
self.service._impl.create_image.assert_called_once_with(**callargs)
self.assertEqual(mock_image__unify_image.return_value, image)
@mock.patch(PATH)
def test_update_image(self, mock_image__unify_image):
image_id = "image_id"
image_name = "image_name"
callargs = {"image_id": image_id,
"image_name": image_name,
"min_disk": 0,
"min_ram": 0}
image = self.service.update_image(image_id,
image_name=image_name)
self.assertEqual(mock_image__unify_image.return_value, image)
self.service._impl.update_image.assert_called_once_with(**callargs)
@mock.patch(PATH)
def test_list_images(self, mock_image__unify_image):
images = [mock.MagicMock()]
self.service._impl.list_images.return_value = images
status = "active"
visibility = "public"
is_public = visibility == "public"
self.assertEqual([mock_image__unify_image.return_value],
self.service.list_images(status,
visibility=visibility))
self.service._impl.list_images.assert_called_once_with(
status=status,
is_public=is_public)
def test_set_visibility(self):
image_id = "image_id"
visibility = "private"
is_public = visibility == "public"
self.service.set_visibility(image_id=image_id, visibility=visibility)
self.service._impl.set_visibility.assert_called_once_with(
image_id=image_id, is_public=is_public)
def test_set_visibility_failure(self):
image_id = "image_id"
visibility = "error"
self.assertRaises(image.VisibilityException,
self.service.set_visibility,
image_id=image_id,
visibility=visibility)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,680
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/heat/stacks.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.task import atomic
from rally.task import types
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.heat import utils
"""Scenarios for Heat stacks."""
@types.convert(template_path={"type": "file"}, files={"type": "file_dict"})
@validation.add("required_services", services=[consts.Service.HEAT])
@validation.add("validate_heat_template", params="template_path")
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["heat"]},
name="HeatStacks.create_and_list_stack",
platform="openstack")
class CreateAndListStack(utils.HeatScenario):
def run(self, template_path, parameters=None,
files=None, environment=None):
"""Create a stack and then list all stacks.
Measure the "heat stack-create" and "heat stack-list" commands
performance.
:param template_path: path to stack template file
:param parameters: parameters to use in heat template
:param files: files used in template
:param environment: stack environment definition
"""
stack = self._create_stack(template_path, parameters,
files, environment)
self.assertTrue(stack)
list_stacks = self._list_stacks()
self.assertIn(stack.id, [i.id for i in list_stacks])
@validation.add("required_services", services=[consts.Service.HEAT])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="HeatStacks.list_stacks_and_resources",
platform="openstack")
class ListStacksAndResources(utils.HeatScenario):
def run(self):
"""List all resources from tenant stacks."""
stacks = self._list_stacks()
for stack in stacks:
with atomic.ActionTimer(self, "heat.list_resources"):
self.clients("heat").resources.list(stack.id)
@types.convert(template_path={"type": "file"}, files={"type": "file_dict"})
@validation.add("required_services", services=[consts.Service.HEAT])
@validation.add("validate_heat_template", params="template_path")
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["heat"]},
name="HeatStacks.create_and_delete_stack",
platform="openstack")
class CreateAndDeleteStack(utils.HeatScenario):
def run(self, template_path, parameters=None,
files=None, environment=None):
"""Create and then delete a stack.
Measure the "heat stack-create" and "heat stack-delete" commands
performance.
:param template_path: path to stack template file
:param parameters: parameters to use in heat template
:param files: files used in template
:param environment: stack environment definition
"""
stack = self._create_stack(template_path, parameters,
files, environment)
self._delete_stack(stack)
@types.convert(template_path={"type": "file"}, files={"type": "file_dict"})
@validation.add("required_services", services=[consts.Service.HEAT])
@validation.add("validate_heat_template", params="template_path")
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["heat"]},
name="HeatStacks.create_check_delete_stack",
platform="openstack")
class CreateCheckDeleteStack(utils.HeatScenario):
def run(self, template_path, parameters=None,
files=None, environment=None):
"""Create, check and delete a stack.
Measure the performance of the following commands:
- heat stack-create
- heat action-check
- heat stack-delete
:param template_path: path to stack template file
:param parameters: parameters to use in heat template
:param files: files used in template
:param environment: stack environment definition
"""
stack = self._create_stack(template_path, parameters,
files, environment)
self._check_stack(stack)
self._delete_stack(stack)
@types.convert(template_path={"type": "file"},
updated_template_path={"type": "file"},
files={"type": "file_dict"},
updated_files={"type": "file_dict"})
@validation.add("required_services", services=[consts.Service.HEAT])
@validation.add("validate_heat_template", params="template_path")
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["heat"]},
name="HeatStacks.create_update_delete_stack",
platform="openstack")
class CreateUpdateDeleteStack(utils.HeatScenario):
def run(self, template_path, updated_template_path,
parameters=None, updated_parameters=None,
files=None, updated_files=None,
environment=None, updated_environment=None):
"""Create, update and then delete a stack.
Measure the "heat stack-create", "heat stack-update"
and "heat stack-delete" commands performance.
:param template_path: path to stack template file
:param updated_template_path: path to updated stack template file
:param parameters: parameters to use in heat template
:param updated_parameters: parameters to use in updated heat template
If not specified then parameters will be
used instead
:param files: files used in template
:param updated_files: files used in updated template. If not specified
files value will be used instead
:param environment: stack environment definition
:param updated_environment: environment definition for updated stack
"""
stack = self._create_stack(template_path, parameters,
files, environment)
self._update_stack(stack, updated_template_path,
updated_parameters or parameters,
updated_files or files,
updated_environment or environment)
self._delete_stack(stack)
@types.convert(template_path={"type": "file"}, files={"type": "file_dict"})
@validation.add("required_services", services=[consts.Service.HEAT])
@validation.add("validate_heat_template", params="template_path")
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["heat"]},
name="HeatStacks.create_stack_and_scale",
platform="openstack")
class CreateStackAndScale(utils.HeatScenario):
def run(self, template_path, output_key, delta,
parameters=None, files=None,
environment=None):
"""Create an autoscaling stack and invoke a scaling policy.
Measure the performance of autoscaling webhooks.
:param template_path: path to template file that includes an
OS::Heat::AutoScalingGroup resource
:param output_key: the stack output key that corresponds to
the scaling webhook
:param delta: the number of instances the stack is expected to
change by.
:param parameters: parameters to use in heat template
:param files: files used in template (dict of file name to
file path)
:param environment: stack environment definition (dict)
"""
# TODO(stpierre): Kilo Heat is *much* better than Juno for the
# requirements of this scenario, so once Juno goes out of
# support we should update this scenario to suck less. Namely:
#
# * Kilo Heat can supply alarm_url attributes without needing
# an output key, so instead of getting the output key from
# the user, just get the name of the ScalingPolicy to apply.
# * Kilo Heat changes the status of a stack while scaling it,
# so _scale_stack() can check for the stack to have changed
# size and for it to be in UPDATE_COMPLETE state, so the
# user no longer needs to specify the expected delta.
stack = self._create_stack(template_path, parameters, files,
environment)
self._scale_stack(stack, output_key, delta)
@types.convert(template_path={"type": "file"}, files={"type": "file_dict"})
@validation.add("required_services", services=[consts.Service.HEAT])
@validation.add("validate_heat_template", params="template_path")
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["heat"]},
name="HeatStacks.create_suspend_resume_delete_stack",
platform="openstack")
class CreateSuspendResumeDeleteStack(utils.HeatScenario):
def run(self, template_path, parameters=None,
files=None, environment=None):
"""Create, suspend-resume and then delete a stack.
Measure performance of the following commands:
heat stack-create
heat action-suspend
heat action-resume
heat stack-delete
:param template_path: path to stack template file
:param parameters: parameters to use in heat template
:param files: files used in template
:param environment: stack environment definition
"""
s = self._create_stack(template_path, parameters, files, environment)
self._suspend_stack(s)
self._resume_stack(s)
self._delete_stack(s)
@validation.add("required_services", services=[consts.Service.HEAT])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="HeatStacks.list_stacks_and_events",
platform="openstack")
class ListStacksAndEvents(utils.HeatScenario):
def run(self):
"""List events from tenant stacks."""
stacks = self._list_stacks()
for stack in stacks:
with atomic.ActionTimer(self, "heat.list_events"):
self.clients("heat").events.list(stack.id)
@types.convert(template_path={"type": "file"}, files={"type": "file_dict"})
@validation.add("required_services", services=[consts.Service.HEAT])
@validation.add("validate_heat_template", params="template_path")
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["heat"]},
name="HeatStacks.create_snapshot_restore_delete_stack",
platform="openstack")
class CreateSnapshotRestoreDeleteStack(utils.HeatScenario):
def run(self, template_path, parameters=None,
files=None, environment=None):
"""Create, snapshot-restore and then delete a stack.
Measure performance of the following commands:
heat stack-create
heat stack-snapshot
heat stack-restore
heat stack-delete
:param template_path: path to stack template file
:param parameters: parameters to use in heat template
:param files: files used in template
:param environment: stack environment definition
"""
stack = self._create_stack(
template_path, parameters, files, environment)
snapshot = self._snapshot_stack(stack)
self._restore_stack(stack, snapshot["id"])
self._delete_stack(stack)
@types.convert(template_path={"type": "file"}, files={"type": "file_dict"})
@validation.add("required_services", services=[consts.Service.HEAT])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["heat"]},
name="HeatStacks.create_stack_and_show_output_via_API",
platform="openstack")
class CreateStackAndShowOutputViaAPI(utils.HeatScenario):
def run(self, template_path, output_key,
parameters=None, files=None, environment=None):
"""Create stack and show output by using old algorithm.
Measure performance of the following commands:
heat stack-create
heat output-show
:param template_path: path to stack template file
:param output_key: the stack output key that corresponds to
the scaling webhook
:param parameters: parameters to use in heat template
:param files: files used in template
:param environment: stack environment definition
"""
stack = self._create_stack(
template_path, parameters, files, environment)
self._stack_show_output_via_API(stack, output_key)
@types.convert(template_path={"type": "file"}, files={"type": "file_dict"})
@validation.add("required_services", services=[consts.Service.HEAT])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["heat"]},
name="HeatStacks.create_stack_and_show_output",
platform="openstack")
class CreateStackAndShowOutput(utils.HeatScenario):
def run(self, template_path, output_key,
parameters=None, files=None, environment=None):
"""Create stack and show output by using new algorithm.
Measure performance of the following commands:
heat stack-create
heat output-show
:param template_path: path to stack template file
:param output_key: the stack output key that corresponds to
the scaling webhook
:param parameters: parameters to use in heat template
:param files: files used in template
:param environment: stack environment definition
"""
stack = self._create_stack(
template_path, parameters, files, environment)
self._stack_show_output(stack, output_key)
@types.convert(template_path={"type": "file"}, files={"type": "file_dict"})
@validation.add("required_services", services=[consts.Service.HEAT])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["heat"]},
name="HeatStacks.create_stack_and_list_output_via_API",
platform="openstack")
class CreateStackAndListOutputViaAPI(utils.HeatScenario):
def run(self, template_path, parameters=None,
files=None, environment=None):
"""Create stack and list outputs by using old algorithm.
Measure performance of the following commands:
heat stack-create
heat output-list
:param template_path: path to stack template file
:param parameters: parameters to use in heat template
:param files: files used in template
:param environment: stack environment definition
"""
stack = self._create_stack(
template_path, parameters, files, environment)
self._stack_list_output_via_API(stack)
@types.convert(template_path={"type": "file"}, files={"type": "file_dict"})
@validation.add("required_services", services=[consts.Service.HEAT])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["heat"]},
name="HeatStacks.create_stack_and_list_output",
platform="openstack")
class CreateStackAndListOutput(utils.HeatScenario):
def run(self, template_path, parameters=None,
files=None, environment=None):
"""Create stack and list outputs by using new algorithm.
Measure performance of the following commands:
heat stack-create
heat output-list
:param template_path: path to stack template file
:param parameters: parameters to use in heat template
:param files: files used in template
:param environment: stack environment definition
"""
stack = self._create_stack(
template_path, parameters, files, environment)
self._stack_list_output(stack)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,681
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/common/services/network/neutron.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from rally.common import cfg
from rally.common import logging
from rally import exceptions
from rally.task import atomic
from rally.task import service
from rally_openstack.common import consts
from rally_openstack.common.services.network import net_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _args_adapter(arguments_map):
def wrapper(func):
def decorator(*args, **kwargs):
for source, dest in arguments_map.items():
if source in kwargs:
if dest in kwargs:
raise TypeError(
f"{func.__name__}() accepts either {dest} keyword "
f"argument or {source} but both were specified.")
kwargs[dest] = kwargs.pop(source)
return func(*args, **kwargs)
return decorator
return wrapper
_NETWORK_ARGS_MAP = {
"provider:network_type": "provider_network_type",
"provider:physical_network": "provider_physical_network",
"provider:segmentation_id": "provider_segmentation_id",
"router:external": "router_external"
}
def _create_network_arg_adapter():
"""A decorator for converting neutron's create kwargs to look pythonic."""
return _args_adapter(_NETWORK_ARGS_MAP)
class _NoneObj(object):
def __len__(self):
return 0
_NONE = _NoneObj()
def _clean_dict(**kwargs):
"""Builds a dict object from keyword arguments ignoring nullable values."""
return dict((k, v) for k, v in kwargs.items() if v != _NONE)
@service.service(service_name="neutron", service_type="network", version="2.0")
class NeutronService(service.Service):
"""A helper class for Neutron API"""
def __init__(self, *args, **kwargs):
super(NeutronService, self).__init__(*args, **kwargs)
self._cached_supported_extensions = None
self._client = None
@property
def client(self):
if self._client is None:
self._client = self._clients.neutron()
return self._client
def create_network_topology(
self, network_create_args=None,
router_create_args=None, router_per_subnet=False,
subnet_create_args=None, subnets_count=1, subnets_dualstack=False
):
"""Create net infrastructure(network, router, subnets).
:param network_create_args: A dict with creation arguments for a
network. The format is equal to the create_network method
:param router_create_args: A dict with creation arguments for an
external router that will add an interface to each created subnet.
The format is equal to the create_subnet method
In case of None value (default behaviour), no router is created.
:param router_per_subnet: whether or not to create router per subnet
or use one router for all subnets.
:param subnet_create_args: A dict with creation arguments for
subnets. The format is equal to the create_subnet method.
:param subnets_count: Number of subnets to create per network.
Defaults to 1
:param subnets_dualstack: Whether subnets should be of both IPv4 and
IPv6 (i.e first subnet will be created for IPv4, the second for
IPv6, the third for IPv4,..). If subnet_create_args includes one of
('cidr', 'start_cidr', 'ip_version') keys, subnets_dualstack
parameter will be ignored.
"""
subnet_create_args = dict(subnet_create_args or {})
network = self.create_network(**(network_create_args or {}))
subnet_create_args["network_id"] = network["id"]
routers = []
if router_create_args is not None:
for i in range(subnets_count if router_per_subnet else 1):
routers.append(self.create_router(**router_create_args))
subnets = []
ip_versions = itertools.cycle([4, 6] if subnets_dualstack else [4])
use_subnets_dualstack = (
"cidr" not in subnet_create_args
and "start_cidr" not in subnet_create_args
and "ip_version" not in subnet_create_args
)
for i in range(subnets_count):
if use_subnets_dualstack:
subnet_create_args["ip_version"] = next(ip_versions)
if routers:
if router_per_subnet:
router = routers[i]
else:
router = routers[0]
subnet_create_args["router_id"] = router["id"]
subnets.append(self.create_subnet(**subnet_create_args))
network["subnets"] = [s["id"] for s in subnets]
return {
"network": network,
"subnets": subnets,
"routers": routers
}
def delete_network_topology(self, topo):
"""Delete network topology
This method was developed to provide a backward compatibility with old
neutron helpers. It is not recommended way and we suggest to use
cleanup manager instead.
:param topo: Network topology as create_network_topology returned
"""
for router in topo["routers"]:
self.remove_gateway_from_router(router["id"])
network_id = topo["network"]["id"]
for port in self.list_ports(network_id=network_id):
self.delete_port(port)
for subnet in self.list_subnets(network_id=network_id):
self.delete_subnet(subnet["id"])
self.delete_network(network_id)
for router in topo["routers"]:
self.delete_router(router["id"])
@atomic.action_timer("neutron.create_network")
@_create_network_arg_adapter()
def create_network(self,
project_id=_NONE,
admin_state_up=_NONE,
dns_domain=_NONE,
mtu=_NONE,
port_security_enabled=_NONE,
provider_network_type=_NONE,
provider_physical_network=_NONE,
provider_segmentation_id=_NONE,
qos_policy_id=_NONE,
router_external=_NONE,
segments=_NONE,
shared=_NONE,
vlan_transparent=_NONE,
description=_NONE,
availability_zone_hints=_NONE):
"""Create neutron network.
:param project_id: The ID of the project that owns the resource. Only
administrative and users with advsvc role can specify a project ID
other than their own. You cannot change this value through
authorization policies.
:param admin_state_up: The administrative state of the network,
which is up (true) or down (false).
:param dns_domain: A valid DNS domain.
:param mtu: The maximum transmission unit (MTU) value to address
fragmentation. Minimum value is 68 for IPv4, and 1280 for IPv6.
:param port_security_enabled: The port security status of the network.
Valid values are enabled (true) and disabled (false). This value is
used as the default value of port_security_enabled field of a
newly created port.
:param provider_network_type: The type of physical network that this
network should be mapped to. For example, flat, vlan, vxlan,
or gre. Valid values depend on a networking back-end.
:param provider_physical_network: The physical network where this
network should be implemented. The Networking API v2.0 does not
provide a way to list available physical networks.
For example, the Open vSwitch plug-in configuration file defines
a symbolic name that maps to specific bridges on each compute host.
:param provider_segmentation_id: The ID of the isolated segment on the
physical network. The network_type attribute defines the
segmentation model. For example, if the network_type value is vlan,
this ID is a vlan identifier. If the network_type value is gre,
this ID is a gre key.
:param qos_policy_id: The ID of the QoS policy associated with the
network.
:param router_external: Indicates whether the network has an external
routing facility that’s not managed by the networking service.
:param segments: A list of provider segment objects.
:param shared: Indicates whether this resource is shared across all
projects. By default, only administrative users can change
this value.
:param vlan_transparent: Indicates the VLAN transparency mode of the
network, which is VLAN transparent (true) or not VLAN
transparent (false).
:param description: A human-readable description for the resource.
Default is an empty string.
:param availability_zone_hints: The availability zone candidate for
the network.
:returns: neutron network dict
"""
body = _clean_dict(
name=self.generate_random_name(),
tenant_id=project_id,
admin_state_up=admin_state_up,
dns_domain=dns_domain,
mtu=mtu,
port_security_enabled=port_security_enabled,
qos_policy_id=qos_policy_id,
segments=segments,
shared=shared,
vlan_transparent=vlan_transparent,
description=description,
availability_zone_hints=availability_zone_hints,
**{
"provider:network_type": provider_network_type,
"provider:physical_network": provider_physical_network,
"provider:segmentation_id": provider_segmentation_id,
"router:external": router_external
}
)
resp = self.client.create_network({"network": body})
return resp["network"]
@atomic.action_timer("neutron.show_network")
def get_network(self, network_id, fields=_NONE):
"""Get network by ID
:param network_id: Network ID to fetch data for
:param fields: The fields that you want the server to return. If no
fields list is specified, the networking API returns all
attributes allowed by the policy settings. By using fields
parameter, the API returns only the requested set of attributes.
"""
body = _clean_dict(fields=fields)
resp = self.client.show_network(network_id, **body)
return resp["network"]
def find_network(self, network_id_or_name, external=_NONE):
"""Find network by identifier (id or name)
:param network_id_or_name: Network ID or name
:param external: check target network is external or not
"""
network = None
for net in self.list_networks():
if network_id_or_name in (net["name"], net["id"]):
network = net
break
if network is None:
raise exceptions.GetResourceFailure(
resource="network",
err=f"no name or id matches {network_id_or_name}")
if external:
if not network.get("router:external", False):
raise exceptions.NotFoundException(
f"Network '{network['name']} (id={network['id']})' is not "
f"external.")
return network
@atomic.action_timer("neutron.update_network")
@_create_network_arg_adapter()
def update_network(self,
network_id,
name=_NONE,
admin_state_up=_NONE,
dns_domain=_NONE,
mtu=_NONE,
port_security_enabled=_NONE,
provider_network_type=_NONE,
provider_physical_network=_NONE,
provider_segmentation_id=_NONE,
qos_policy_id=_NONE,
router_external=_NONE,
segments=_NONE,
shared=_NONE,
description=_NONE,
is_default=_NONE):
"""Update neutron network.
:param network_id: ID of the network to update
:param name: Human-readable name of the network.
:param admin_state_up: The administrative state of the network,
which is up (true) or down (false).
:param dns_domain: A valid DNS domain.
:param mtu: The maximum transmission unit (MTU) value to address
fragmentation. Minimum value is 68 for IPv4, and 1280 for IPv6.
:param port_security_enabled: The port security status of the network.
Valid values are enabled (true) and disabled (false). This value is
used as the default value of port_security_enabled field of a
newly created port.
:param provider_network_type: The type of physical network that this
network should be mapped to. For example, flat, vlan, vxlan,
or gre. Valid values depend on a networking back-end.
:param provider_physical_network: The physical network where this
network should be implemented. The Networking API v2.0 does not
provide a way to list available physical networks.
For example, the Open vSwitch plug-in configuration file defines
a symbolic name that maps to specific bridges on each compute host.
:param provider_segmentation_id: The ID of the isolated segment on the
physical network. The network_type attribute defines the
segmentation model. For example, if the network_type value is vlan,
this ID is a vlan identifier. If the network_type value is gre,
this ID is a gre key.
:param qos_policy_id: The ID of the QoS policy associated with the
network.
:param router_external: Indicates whether the network has an external
routing facility that’s not managed by the networking service.
:param segments: A list of provider segment objects.
:param shared: Indicates whether this resource is shared across all
projects. By default, only administrative users can change
this value.
:param description: A human-readable description for the resource.
Default is an empty string.
:param is_default: The network is default or not.
:returns: neutron network dict
"""
body = _clean_dict(
name=name,
admin_state_up=admin_state_up,
dns_domain=dns_domain,
mtu=mtu,
port_security_enabled=port_security_enabled,
qos_policy_id=qos_policy_id,
segments=segments,
shared=shared,
description=description,
is_default=is_default,
**{
"provider:network_type": provider_network_type,
"provider:physical_network": provider_physical_network,
"provider:segmentation_id": provider_segmentation_id,
"router:external": router_external
}
)
if not body:
raise TypeError("No updates for a network.")
resp = self.client.update_network(network_id, {"network": body})
return resp["network"]
@atomic.action_timer("neutron.delete_network")
def delete_network(self, network_id):
"""Delete network
:param network_id: Network ID
"""
self.client.delete_network(network_id)
@atomic.action_timer("neutron.list_networks")
def list_networks(self, name=_NONE, router_external=_NONE, status=_NONE,
**kwargs):
"""List networks.
:param name: Filter the list result by the human-readable name of the
resource.
:param router_external: Filter the network list result based on whether
the network has an external routing facility that’s not managed by
the networking service.
:param status: Filter the network list result by network status.
Values are ACTIVE, DOWN, BUILD or ERROR.
:param kwargs: additional network list filters
"""
kwargs["router:external"] = router_external
filters = _clean_dict(name=name, status=status, **kwargs)
return self.client.list_networks(**filters)["networks"]
IPv4_DEFAULT_DNS_NAMESERVERS = ["8.8.8.8", "8.8.4.4"]
IPv6_DEFAULT_DNS_NAMESERVERS = ["dead:beaf::1", "dead:beaf::2"]
@atomic.action_timer("neutron.create_subnet")
def create_subnet(self, network_id, router_id=_NONE, project_id=_NONE,
enable_dhcp=_NONE,
dns_nameservers=_NONE, allocation_pools=_NONE,
host_routes=_NONE, ip_version=_NONE, gateway_ip=_NONE,
cidr=_NONE, start_cidr=_NONE, prefixlen=_NONE,
ipv6_address_mode=_NONE, ipv6_ra_mode=_NONE,
segment_id=_NONE, subnetpool_id=_NONE,
use_default_subnetpool=_NONE, service_types=_NONE,
dns_publish_fixed_ip=_NONE):
"""Create neutron subnet.
:param network_id: The ID of the network to which the subnet belongs.
:param router_id: An external router and add as an interface to subnet.
:param project_id: The ID of the project that owns the resource.
Only administrative and users with advsvc role can specify a
project ID other than their own. You cannot change this value
through authorization policies.
:param enable_dhcp: Indicates whether dhcp is enabled or disabled for
the subnet. Default is true.
:param dns_nameservers: List of dns name servers associated with the
subnet. Default is a list of Google DNS
:param allocation_pools: Allocation pools with start and end IP
addresses for this subnet. If allocation_pools are not specified,
OpenStack Networking automatically allocates pools for covering
all IP addresses in the CIDR, excluding the address reserved for
the subnet gateway by default.
:param host_routes: Additional routes for the subnet. A list of
dictionaries with destination and nexthop parameters. Default
value is an empty list.
:param gateway_ip: Gateway IP of this subnet. If the value is null that
implies no gateway is associated with the subnet. If the gateway_ip
is not specified, OpenStack Networking allocates an address from
the CIDR for the gateway for the subnet by default.
:param ip_version: The IP protocol version. Value is 4 or 6. If CIDR
is specified, the value automatically can be detected from it,
otherwise defaults to 4.
Also, check start_cidr param description.
:param cidr: The CIDR of the subnet. If not specified, it will be
auto-generated based on start_cidr and ip_version parameters.
:param start_cidr:
:param prefixlen: he prefix length to use for subnet allocation from a
subnet pool. If not specified, the default_prefixlen value of the
subnet pool will be used.
:param ipv6_address_mode: The IPv6 address modes specifies mechanisms
for assigning IP addresses. Value is slaac, dhcpv6-stateful,
dhcpv6-stateless.
:param ipv6_ra_mode: The IPv6 router advertisement specifies whether
the networking service should transmit ICMPv6 packets, for a
subnet. Value is slaac, dhcpv6-stateful, dhcpv6-stateless.
:param segment_id: The ID of a network segment the subnet is
associated with. It is available when segment extension is enabled.
:param subnetpool_id: The ID of the subnet pool associated with the
subnet.
:param use_default_subnetpool: Whether to allocate this subnet from
the default subnet pool.
:param service_types: The service types associated with the subnet.
:param dns_publish_fixed_ip: Whether to publish DNS records for IPs
from this subnet. Default is false.
"""
if cidr == _NONE:
ip_version, cidr = net_utils.generate_cidr(
ip_version=ip_version, start_cidr=(start_cidr or None))
if ip_version == _NONE:
ip_version = net_utils.get_ip_version(cidr)
if dns_nameservers == _NONE:
if ip_version == 4:
dns_nameservers = self.IPv4_DEFAULT_DNS_NAMESERVERS
else:
dns_nameservers = self.IPv6_DEFAULT_DNS_NAMESERVERS
body = _clean_dict(
name=self.generate_random_name(),
network_id=network_id,
tenant_id=project_id,
enable_dhcp=enable_dhcp,
dns_nameservers=dns_nameservers,
allocation_pools=allocation_pools,
host_routes=host_routes,
ip_version=ip_version,
gateway_ip=gateway_ip,
cidr=cidr,
prefixlen=prefixlen,
ipv6_address_mode=ipv6_address_mode,
ipv6_ra_mode=ipv6_ra_mode,
segment_id=segment_id,
subnetpool_id=subnetpool_id,
use_default_subnetpool=use_default_subnetpool,
service_types=service_types,
dns_publish_fixed_ip=dns_publish_fixed_ip
)
subnet = self.client.create_subnet({"subnet": body})["subnet"]
if router_id:
self.add_interface_to_router(router_id=router_id,
subnet_id=subnet["id"])
return subnet
@atomic.action_timer("neutron.show_subnet")
def get_subnet(self, subnet_id):
"""Get subnet
:param subnet_id: Subnet ID
"""
return self.client.show_subnet(subnet_id)["subnet"]
@atomic.action_timer("neutron.update_subnet")
def update_subnet(self, subnet_id, name=_NONE, enable_dhcp=_NONE,
dns_nameservers=_NONE, allocation_pools=_NONE,
host_routes=_NONE, gateway_ip=_NONE, description=_NONE,
service_types=_NONE, segment_id=_NONE,
dns_publish_fixed_ip=_NONE):
"""Update neutron subnet.
:param subnet_id: The ID of the subnet to update.
:param name: Human-readable name of the resource.
:param description: A human-readable description for the resource.
Default is an empty string.
:param enable_dhcp: Indicates whether dhcp is enabled or disabled for
the subnet. Default is true.
:param dns_nameservers: List of dns name servers associated with the
subnet. Default is a list of Google DNS
:param allocation_pools: Allocation pools with start and end IP
addresses for this subnet. If allocation_pools are not specified,
OpenStack Networking automatically allocates pools for covering
all IP addresses in the CIDR, excluding the address reserved for
the subnet gateway by default.
:param host_routes: Additional routes for the subnet. A list of
dictionaries with destination and nexthop parameters. Default
value is an empty list.
:param gateway_ip: Gateway IP of this subnet. If the value is null that
implies no gateway is associated with the subnet. If the gateway_ip
is not specified, OpenStack Networking allocates an address from
the CIDR for the gateway for the subnet by default.
:param segment_id: The ID of a network segment the subnet is
associated with. It is available when segment extension is enabled.
:param service_types: The service types associated with the subnet.
:param dns_publish_fixed_ip: Whether to publish DNS records for IPs
from this subnet. Default is false.
"""
body = _clean_dict(
name=name,
enable_dhcp=enable_dhcp,
dns_nameservers=dns_nameservers,
allocation_pools=allocation_pools,
host_routes=host_routes,
gateway_ip=gateway_ip,
segment_id=segment_id,
service_types=service_types,
dns_publish_fixed_ip=dns_publish_fixed_ip,
description=description
)
if not body:
raise TypeError("No updates for a subnet.")
resp = self.client.update_subnet(subnet_id, {"subnet": body})["subnet"]
return resp
@atomic.action_timer("neutron.delete_subnet")
def delete_subnet(self, subnet_id):
"""Delete subnet
:param subnet_id: Subnet ID
"""
self.client.delete_subnet(subnet_id)
@atomic.action_timer("neutron.list_subnets")
def list_subnets(self, network_id=_NONE, **filters):
"""List subnets.
:param network_id: Filter the subnet list result by the ID of the
network to which the subnet belongs.
:param filters: additional subnet list filters
"""
if network_id:
filters["network_id"] = network_id
return self.client.list_subnets(**filters)["subnets"]
@atomic.action_timer("neutron.create_router")
def create_router(self, project_id=_NONE, admin_state_up=_NONE,
description=_NONE, discover_external_gw=False,
external_gateway_info=_NONE, distributed=_NONE, ha=_NONE,
availability_zone_hints=_NONE, service_type_id=_NONE,
flavor_id=_NONE, enable_snat=_NONE):
"""Create router.
:param project_id: The ID of the project that owns the resource. Only
administrative and users with advsvc role can specify a project ID
other than their own. You cannot change this value through
authorization policies.
:param admin_state_up: The administrative state of the resource, which
is up (true) or down (false). Default is true.
:param description: A human-readable description for the resource.
:param discover_external_gw: Take one of available external networks
and use it as external gateway. The parameter can not be used in
combination of external_gateway_info parameter.
:param external_gateway_info: The external gateway information of
the router. If the router has an external gateway, this would be
a dict with network_id, enable_snat and external_fixed_ips.
:param distributed: true indicates a distributed router. It is
available when dvr extension is enabled.
:param ha: true indicates a highly-available router. It is available
when l3-ha extension is enabled.
:param availability_zone_hints: The availability zone candidates for
the router. It is available when router_availability_zone extension
is enabled.
:param service_type_id: The ID of the service type associated with
the router.
:param flavor_id: The ID of the flavor associated with the router.
:param enable_snat: Whether to include `enable_snat: True` to
external_gateway_info or not. By default, it is enabled if a user
is admin and "ext-gw-mode" extension presents
"""
if external_gateway_info is _NONE and discover_external_gw:
for external_network in self.list_networks(router_external=True):
external_gateway_info = {"network_id": external_network["id"]}
if enable_snat is _NONE:
permission = self._clients.credential.permission
is_admin = permission == consts.EndpointPermission.ADMIN
if (self.supports_extension("ext-gw-mode", silent=True)
and is_admin):
external_gateway_info["enable_snat"] = True
elif enable_snat:
external_gateway_info["enable_snat"] = True
break
body = _clean_dict(
name=self.generate_random_name(),
# tenant_id should work for both new and old neutron instances
tenant_id=project_id,
external_gateway_info=external_gateway_info,
description=description,
distributed=distributed,
ha=ha,
availability_zone_hints=availability_zone_hints,
service_type_id=service_type_id,
flavor_id=flavor_id,
admin_state_up=admin_state_up
)
resp = self.client.create_router({"router": body})
return resp["router"]
@atomic.action_timer("neutron.show_router")
def get_router(self, router_id, fields=_NONE):
"""Get router details
:param router_id: Router ID
:param fields: The fields that you want the server to return. If no
fields list is specified, the networking API returns all
attributes allowed by the policy settings. By using fields
parameter, the API returns only the requested set of attributes.
"""
body = _clean_dict(fields=fields)
return self.client.show_router(router_id, **body)["router"]
@atomic.action_timer("neutron.add_interface_router")
def add_interface_to_router(self, router_id, subnet_id=_NONE,
port_id=_NONE):
"""Add interface to router.
:param router_id: The ID of the router.
:param subnet_id: The ID of the subnet. One of subnet_id or port_id
must be specified.
:param port_id: The ID of the port. One of subnet_id or port_id must
be specified.
"""
if (subnet_id and port_id) or (not subnet_id and not port_id):
raise TypeError("One of subnet_id or port_id must be specified "
"while adding interface to router.")
body = _clean_dict(subnet_id=subnet_id, port_id=port_id)
return self.client.add_interface_router(router_id, body)
@atomic.action_timer("neutron.remove_interface_router")
def remove_interface_from_router(self, router_id, subnet_id=_NONE,
port_id=_NONE):
"""Remove interface from router
:param router_id: The ID of the router.
:param subnet_id: The ID of the subnet. One of subnet_id or port_id
must be specified.
:param port_id: The ID of the port. One of subnet_id or port_id must
be specified.
"""
from neutronclient.common import exceptions as neutron_exceptions
if (subnet_id and port_id) or (not subnet_id and not port_id):
raise TypeError("One of subnet_id or port_id must be specified "
"to remove interface from router.")
body = _clean_dict(subnet_id=subnet_id, port_id=port_id)
try:
self.client.remove_interface_router(router_id, body)
except (neutron_exceptions.BadRequest,
neutron_exceptions.NotFound):
# Some neutron plugins don't use router as
# the device ID. Also, some plugin doesn't allow
# to update the ha router interface as there is
# an internal logic to update the interface/data model
# instead.
LOG.exception("Failed to remove an interface from a router.")
@atomic.action_timer("neutron.add_gateway_router")
def add_gateway_to_router(self, router_id, network_id, enable_snat=None,
external_fixed_ips=None):
"""Adds an external network gateway to the specified router.
:param router_id: Router ID
:param enable_snat: whether SNAT should occur on the external gateway
or not
"""
gw_info = {"network_id": network_id}
if enable_snat is not None:
if self.supports_extension("ext-gw-mode", silent=True):
gw_info["enable_snat"] = enable_snat
if external_fixed_ips is not None:
gw_info["external_fixed_ips"] = external_fixed_ips
self.client.add_gateway_router(router_id, gw_info)
@atomic.action_timer("neutron.remove_gateway_router")
def remove_gateway_from_router(self, router_id):
"""Removes an external network gateway from the specified router.
:param router_id: Router ID
"""
self.client.remove_gateway_router(router_id)
@atomic.action_timer("neutron.update_router")
def update_router(self, router_id, name=_NONE, admin_state_up=_NONE,
description=_NONE, external_gateway_info=_NONE,
distributed=_NONE, ha=_NONE):
"""Update router.
:param router_id: The ID of the router to update.
:param name: Human-readable name of the resource.
:param admin_state_up: The administrative state of the resource, which
is up (true) or down (false). Default is true.
:param description: A human-readable description for the resource.
:param external_gateway_info: The external gateway information of
the router. If the router has an external gateway, this would be
a dict with network_id, enable_snat and external_fixed_ips.
:param distributed: true indicates a distributed router. It is
available when dvr extension is enabled.
:param ha: true indicates a highly-available router. It is available
when l3-ha extension is enabled.
"""
body = _clean_dict(
name=name,
external_gateway_info=external_gateway_info,
description=description,
distributed=distributed,
ha=ha,
admin_state_up=admin_state_up
)
if not body:
raise TypeError("No updates for a router.")
return self.client.update_router(router_id, {"router": body})["router"]
@atomic.action_timer("neutron.delete_router")
def delete_router(self, router_id):
"""Delete router
:param router_id: Router ID
"""
self.client.delete_router(router_id)
@staticmethod
def _filter_routers(routers, subnet_ids):
for router in routers:
gtw_info = router["external_gateway_info"]
if gtw_info is None:
continue
if any(fixed_ip["subnet_id"] in subnet_ids
for fixed_ip in gtw_info["external_fixed_ips"]):
yield router
@atomic.action_timer("neutron.list_routers")
def list_routers(self, subnet_ids=_NONE, **kwargs):
"""List routers.
:param subnet_ids: Filter routers by attached subnet(s). Can be a
string or and an array with strings.
:param kwargs: additional router list filters
"""
routers = self.client.list_routers(**kwargs)["routers"]
if subnet_ids != _NONE:
routers = list(self._filter_routers(routers,
subnet_ids=subnet_ids))
return routers
@atomic.action_timer("neutron.create_port")
def create_port(self, network_id, **kwargs):
"""Create neutron port.
:param network_id: neutron network dict
:param kwargs: other optional neutron port creation params
(name is restricted param)
:returns: neutron port dict
"""
kwargs["name"] = self.generate_random_name()
body = _clean_dict(
network_id=network_id,
**kwargs
)
return self.client.create_port({"port": body})["port"]
@atomic.action_timer("neutron.show_port")
def get_port(self, port_id, fields=_NONE):
"""Get port details
:param port_id: Port ID
:param fields: The fields that you want the server to return. If no
fields list is specified, the networking API returns all
attributes allowed by the policy settings. By using fields
parameter, the API returns only the requested set of attributes.
"""
body = _clean_dict(fields=fields)
return self.client.show_port(port_id, **body)["port"]
@atomic.action_timer("neutron.update_port")
def update_port(self, port_id, **kwargs):
"""Update neutron port.
:param port_id: The ID of the port to update.
:param kwargs: other optional neutron port creation params
(name is restricted param)
:returns: neutron port dict
"""
body = _clean_dict(**kwargs)
if not body:
raise TypeError("No updates for a port.")
return self.client.update_port(port_id, {"port": body})["port"]
ROUTER_INTERFACE_OWNERS = ("network:router_interface",
"network:router_interface_distributed",
"network:ha_router_replicated_interface")
ROUTER_GATEWAY_OWNER = "network:router_gateway"
@atomic.action_timer("neutron.delete_port")
def delete_port(self, port):
"""Delete port.
:param port: Port ID or object
:returns bool: False if neutron returns NotFound error on port delete
"""
from neutronclient.common import exceptions as neutron_exceptions
if not isinstance(port, dict):
port = {"id": port, "device_owner": False}
if (port["device_owner"] in self.ROUTER_INTERFACE_OWNERS
or port["device_owner"] == self.ROUTER_GATEWAY_OWNER):
if port["device_owner"] == self.ROUTER_GATEWAY_OWNER:
self.remove_gateway_from_router(port["device_id"])
self.remove_interface_from_router(
router_id=port["device_id"], port_id=port["id"])
else:
try:
self.client.delete_port(port["id"])
except neutron_exceptions.PortNotFoundClient:
# port is auto-removed
return False
return True
@atomic.action_timer("neutron.list_ports")
def list_ports(self, network_id=_NONE, device_id=_NONE, device_owner=_NONE,
status=_NONE, **kwargs):
"""List ports.
:param network_id: Filter the list result by the ID of the attached
network.
:param device_id: Filter the port list result by the ID of the device
that uses this port. For example, a server instance or a logical
router.
:param device_owner: Filter the port result list by the entity type
that uses this port. For example, compute:nova (server instance),
network:dhcp (DHCP agent) or network:router_interface
(router interface).
:param status: Filter the port list result by the port status.
Values are ACTIVE, DOWN, BUILD and ERROR.
:param kwargs: additional port list filters
"""
filters = _clean_dict(
network_id=network_id,
device_id=device_id,
device_owner=device_owner,
status=status,
**kwargs
)
return self.client.list_ports(**filters)["ports"]
@atomic.action_timer("neutron.create_floating_ip")
def create_floatingip(self, floating_network=None, project_id=_NONE,
fixed_ip_address=_NONE, floating_ip_address=_NONE,
port_id=_NONE, subnet_id=_NONE, dns_domain=_NONE,
dns_name=_NONE):
"""Create floating IP with floating_network.
:param floating_network: external network associated with floating IP.
:param project_id: The ID of the project.
:param fixed_ip_address: The fixed IP address that is associated with
the floating IP. If an internal port has multiple associated IP
addresses, the service chooses the first IP address unless you
explicitly define a fixed IP address in the fixed_ip_address
parameter.
:param floating_ip_address: The floating IP address. Default policy
settings enable only administrative users to set floating IP
addresses and some non-administrative users might require a
floating IP address. If you do not specify a floating IP address
in the request, the operation automatically allocates one.
:param port_id: The ID of a port associated with the floating IP.
To associate the floating IP with a fixed IP at creation time,
you must specify the identifier of the internal port.
:param subnet_id: The subnet ID on which you want to create the
floating IP.
:param dns_domain: A valid DNS domain.
:param dns_name: A valid DNS name.
"""
from neutronclient.common import exceptions as neutron_exceptions
if isinstance(floating_network, dict):
net_id = floating_network["id"]
elif floating_network:
net_id = self.find_network(floating_network, external=True)["id"]
else:
ext_networks = self.list_networks(router_external=True)
if not ext_networks:
raise exceptions.NotFoundException(
"Failed to allocate floating IP since no external "
"networks found.")
net_id = ext_networks[0]["id"]
description = _NONE
api_info = self._clients.credential.api_info.get("neutron", {})
if (not api_info.get("pre_newton", False)
and not CONF.openstack.pre_newton_neutron):
description = self.generate_random_name()
body = _clean_dict(
tenant_id=project_id,
description=description,
floating_network_id=net_id,
fixed_ip_address=fixed_ip_address,
floating_ip_address=floating_ip_address,
port_id=port_id,
subnet_id=subnet_id,
dns_domain=dns_domain,
dns_name=dns_name
)
try:
resp = self.client.create_floatingip({"floatingip": body})
return resp["floatingip"]
except neutron_exceptions.BadRequest as e:
error = "%s" % e
if "Unrecognized attribute" in error and "'description'" in error:
LOG.info("It looks like you have Neutron API of pre-Newton "
"OpenStack release. Setting "
"openstack.pre_newton_neutron option via Rally "
"configuration should fix an issue.")
raise
@atomic.action_timer("neutron.show_floating_ip")
def get_floatingip(self, floatingip_id, fields=_NONE):
"""Get floating IP details
:param floatingip_id: Floating IP ID
:param fields: The fields that you want the server to return. If no
fields list is specified, the networking API returns all
attributes allowed by the policy settings. By using fields
parameter, the API returns only the requested set of attributes.
"""
body = _clean_dict(fields=fields)
resp = self.client.show_floatingip(floatingip_id, **body)
return resp["floatingip"]
@atomic.action_timer("neutron.update_floating_ip")
def update_floatingip(self, floating_ip_id, fixed_ip_address=_NONE,
port_id=_NONE, description=_NONE):
"""Update floating IP.
:param floating_ip_id: The ID of the floating IP to update.
:param fixed_ip_address: The fixed IP address that is associated with
the floating IP. If an internal port has multiple associated IP
addresses, the service chooses the first IP address unless you
explicitly define a fixed IP address in the fixed_ip_address
parameter.
:param port_id: The ID of a port associated with the floating IP.
To associate the floating IP with a fixed IP at creation time,
you must specify the identifier of the internal port.
:param description: A human-readable description for the resource.
Default is an empty string.
"""
body = _clean_dict(
description=description,
fixed_ip_address=fixed_ip_address,
port_id=port_id
)
if not body:
raise TypeError("No updates for a floating ip.")
return self.client.update_floatingip(
floating_ip_id, {"floatingip": body})["floatingip"]
@atomic.action_timer("neutron.delete_floating_ip")
def delete_floatingip(self, floatingip_id):
"""Delete floating IP.
:param floatingip_id: floating IP id
"""
self.client.delete_floatingip(floatingip_id)
@atomic.action_timer("neutron.associate_floating_ip")
def associate_floatingip(self, port_id=None, device_id=None,
floatingip_id=None, floating_ip_address=None,
fixed_ip_address=None):
"""Add floating IP to an instance
:param port_id: ID of the port to associate floating IP with
:param device_id: ID of the device to find port to use
:param floatingip_id: ID of the floating IP
:param floating_ip_address: IP address to find floating IP to use
:param fixed_ip_address: The fixed IP address to associate with the
floating ip
"""
if (device_id is None and port_id is None) or (device_id and port_id):
raise TypeError("One of device_id or port_id must be specified.")
if ((floating_ip_address is None and floatingip_id is None)
or (floating_ip_address and floatingip_id)):
raise TypeError("One of floating_ip_address or floatingip_id "
"must be specified.")
if port_id is None:
ports = self.list_ports(device_id=device_id)
if not ports:
raise exceptions.GetResourceFailure(
resource="port",
err=f"device '{device_id}' have no ports associated.")
port_id = ports[0]["id"]
if floatingip_id is None:
filtered_fips = self.list_floatingips(
floating_ip_address=floating_ip_address)
if not filtered_fips:
raise exceptions.GetResourceFailure(
resource="floating ip",
err=f"There is no floating ip with '{floating_ip_address}'"
f" address.")
floatingip_id = filtered_fips[0]["id"]
additional = {}
if fixed_ip_address:
additional["fixed_ip_address"] = fixed_ip_address
return self.update_floatingip(floatingip_id, port_id=port_id,
**additional)
@atomic.action_timer("neutron.dissociate_floating_ip")
def dissociate_floatingip(self, floatingip_id=None,
floating_ip_address=None):
"""Remove floating IP from an instance
:param floatingip_id: ID of the floating IP
:param floating_ip_address: IP address to find floating IP to use
"""
if ((floating_ip_address is None and floatingip_id is None)
or (floating_ip_address and floatingip_id)):
raise TypeError("One of floating_ip_address or floatingip_id "
"must be specified.")
if floatingip_id is None:
filtered_fips = self.list_floatingips(
floating_ip_address=floating_ip_address)
if not filtered_fips:
raise exceptions.GetResourceFailure(
resource="floating ip",
err=f"There is no floating ip with '{floating_ip_address}'"
f" address.")
floatingip_id = filtered_fips[0]["id"]
return self.update_floatingip(floatingip_id, port_id=None)
@atomic.action_timer("neutron.list_floating_ips")
def list_floatingips(self, router_id=_NONE, port_id=_NONE, status=_NONE,
description=_NONE, floating_network_id=_NONE,
floating_ip_address=_NONE, fixed_ip_address=_NONE,
**kwargs):
"""List floating IPs.
:param router_id: Filter the floating IP list result by the ID of the
router for the floating IP.
:param port_id: Filter the floating IP list result by the ID of a port
associated with the floating IP.
:param status: Filter the floating IP list result by the status of the
floating IP. Values are ACTIVE, DOWN and ERROR.
:param description: Filter the list result by the human-readable
description of the resource. (available only for OpenStack Newton+)
:param floating_network_id: Filter the floating IP list result by the
ID of the network associated with the floating IP.
:param fixed_ip_address: Filter the floating IP list result by the
fixed IP address that is associated with the floating IP address.
:param floating_ip_address: Filter the floating IP list result by the
floating IP address.
:param kwargs: additional floating IP list filters
"""
filters = _clean_dict(
router_id=router_id,
port_id=port_id,
status=status,
description=description,
floating_network_id=floating_network_id,
floating_ip_address=floating_ip_address,
fixed_ip_address=fixed_ip_address,
**kwargs
)
resp = self.client.list_floatingips(**filters)
return resp["floatingips"]
@atomic.action_timer("neutron.create_security_group")
def create_security_group(self, name=None, project_id=_NONE,
description=_NONE, stateful=_NONE):
"""Create a security group
:param name: Human-readable name of the resource.
:param project_id: The ID of the project.
:param description: A human-readable description for the resource.
Default is an empty string.
:param stateful: Indicates if the security group is stateful or
stateless.
"""
body = _clean_dict(
name=name or self.generate_random_name(),
tenant_id=project_id,
description=description,
stateful=stateful
)
resp = self.client.create_security_group({"security_group": body})
return resp["security_group"]
@atomic.action_timer("neutron.show_security_group")
def get_security_group(self, security_group_id, fields=_NONE):
"""Get security group
:param security_group_id: Security group ID
:param fields: The fields that you want the server to return. If no
fields list is specified, the networking API returns all
attributes allowed by the policy settings. By using fields
parameter, the API returns only the requested set of attributes.
"""
body = _clean_dict(fields=fields)
resp = self.client.show_security_group(security_group_id, **body)
return resp["security_group"]
@atomic.action_timer("neutron.update_security_group")
def update_security_group(self, security_group_id, name=_NONE,
description=_NONE, stateful=_NONE):
"""Update a security group
:param security_group_id: Security group ID
:param name: Human-readable name of the resource.
:param description: A human-readable description for the resource.
Default is an empty string.
:param stateful: Indicates if the security group is stateful or
stateless.
"""
body = _clean_dict(
name=name,
description=description,
stateful=stateful
)
if not body:
raise TypeError("No updates for a security group.")
resp = self.client.update_security_group(security_group_id,
{"security_group": body})
return resp["security_group"]
@atomic.action_timer("neutron.delete_security_group")
def delete_security_group(self, security_group_id):
"""Delete security group.
:param security_group_id: Security group ID
"""
return self.client.delete_security_group(security_group_id)
@atomic.action_timer("neutron.list_security_groups")
def list_security_groups(self, name=_NONE, **kwargs):
"""List security groups.
:param name: Filter the list result by the human-readable name of the
resource.
:param kwargs: additional security group list filters
"""
if name:
kwargs["name"] = name
resp = self.client.list_security_groups(**kwargs)
return resp["security_groups"]
@atomic.action_timer("neutron.create_security_group_rule")
def create_security_group_rule(self,
security_group_id,
direction="ingress",
protocol="tcp",
ethertype=_NONE,
port_range_min=_NONE,
port_range_max=_NONE,
remote_ip_prefix=_NONE,
description=_NONE):
"""Create security group rule.
:param security_group_id: The security group ID to associate with this
security group rule.
:param direction: Ingress or egress, which is the direction in which
the security group rule is applied.
:param protocol: The IP protocol can be represented by a string, an
integer, or null. Valid string or integer values are any or 0, ah
or 51, dccp or 33, egp or 8, esp or 50, gre or 47, icmp or 1,
icmpv6 or 58, igmp or 2, ipip or 4, ipv6-encap or 41,
ipv6-frag or 44, ipv6-icmp or 58, ipv6-nonxt or 59,
ipv6-opts or 60, ipv6-route or 43, ospf or 89, pgm or 113,
rsvp or 46, sctp or 132, tcp or 6, udp or 17, udplite or 136,
vrrp or 112. Additionally, any integer value between [0-255] is
also valid. The string any (or integer 0) means all IP protocols.
See the constants in neutron_lib.constants for the most
up-to-date list of supported strings.
:param ethertype: Must be IPv4 or IPv6, and addresses represented in
CIDR must match the ingress or egress rules.
:param port_range_min: The minimum port number in the range that is
matched by the security group rule. If the protocol is TCP, UDP,
DCCP, SCTP or UDP-Lite this value must be less than or equal to
the port_range_max attribute value. If the protocol is ICMP, this
value must be an ICMP type.
:param port_range_max: The maximum port number in the range that is
matched by the security group rule. If the protocol is TCP, UDP,
DCCP, SCTP or UDP-Lite this value must be greater than or equal to
the port_range_min attribute value. If the protocol is ICMP, this
value must be an ICMP code.
:param remote_ip_prefix: The remote IP prefix that is matched by this
security group rule.
:param description: A human-readable description for the resource.
Default is an empty string.
"""
body = _clean_dict(
security_group_id=security_group_id,
direction=direction,
protocol=protocol,
ethertype=ethertype,
port_range_min=port_range_min,
port_range_max=port_range_max,
remote_ip_prefix=remote_ip_prefix,
description=description
)
return self.client.create_security_group_rule(
{"security_group_rule": body})["security_group_rule"]
@atomic.action_timer("neutron.show_security_group_rule")
def get_security_group_rule(self, security_group_rule_id, verbose=_NONE,
fields=_NONE):
"""Get security group details
:param security_group_rule_id: Security group rule ID
:param verbose: Show detailed information.
:param fields: The fields that you want the server to return. If no
fields list is specified, the networking API returns all
attributes allowed by the policy settings. By using fields
parameter, the API returns only the requested set of attributes.
"""
body = _clean_dict(verbose=verbose, fields=fields)
resp = self.client.show_security_group_rule(
security_group_rule_id, **body)
return resp["security_group_rule"]
@atomic.action_timer("neutron.delete_security_group_rule")
def delete_security_group_rule(self, security_group_rule_id):
"""Delete a given security group rule.
:param security_group_rule_id: Security group rule ID
"""
self.client.delete_security_group_rule(
security_group_rule_id)
@atomic.action_timer("neutron.list_security_group_rules")
def list_security_group_rules(
self, security_group_id=_NONE, protocol=_NONE, direction=_NONE,
port_range_min=_NONE, port_range_max=_NONE, description=_NONE,
**kwargs):
"""List all security group rules.
:param security_group_id: Filter the security group rule list result
by the ID of the security group that associates with this security
group rule.
:param protocol: Filter the security group rule list result by the IP
protocol.
:param direction: Filter the security group rule list result by the
direction in which the security group rule is applied, which is
ingress or egress.
:param port_range_min: Filter the security group rule list result by
the minimum port number in the range that is matched by the
security group rule.
:param port_range_max: Filter the security group rule list result by
the maximum port number in the range that is matched by the
security group rule.
:param description: Filter the list result by the human-readable
description of the resource.
:param kwargs: additional security group rule list filters
:return: list of security group rules
"""
filters = _clean_dict(
security_group_id=security_group_id,
protocol=protocol,
direction=direction,
port_range_min=port_range_min,
port_range_max=port_range_max,
description=description,
**kwargs
)
resp = self.client.list_security_group_rules(**filters)
return resp["security_group_rules"]
@atomic.action_timer("neutron.list_agents")
def list_agents(self, **kwargs):
"""Fetches agents.
:param kwargs: filters
:returns: user agents list
"""
return self.client.list_agents(**kwargs)["agents"]
@atomic.action_timer("neutron.list_extension")
def list_extensions(self):
"""List neutron extensions."""
return self.client.list_extensions()["extensions"]
@property
def cached_supported_extensions(self):
"""Return cached list of extension if exist or fetch it if is missed"""
if self._cached_supported_extensions is None:
self._cached_supported_extensions = self.list_extensions()
return self._cached_supported_extensions
def supports_extension(self, extension, silent=False):
"""Check whether a neutron extension is supported.
:param extension: Extension to check
:param silent: Return boolean result of the search instead of raising
an exception
"""
exist = any(ext.get("alias") == extension
for ext in self.cached_supported_extensions)
if not silent and not exist:
raise exceptions.NotFoundException(
message=f"Neutron driver does not support {extension}")
return exist
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,682
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/dataplane/heat.py
|
# Copyright 2016: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pkgutil
from rally.common import utils as rutils
from rally.common import validation
from rally import exceptions
from rally_openstack.common import consts
from rally_openstack.common import osclients
from rally_openstack.task.cleanup import manager as resource_manager
from rally_openstack.task import context
from rally_openstack.task.scenarios.heat import utils as heat_utils
def get_data(filename_or_resource):
if isinstance(filename_or_resource, list):
return pkgutil.get_data(*filename_or_resource)
return open(filename_or_resource).read()
@validation.add("required_platform", platform="openstack", users=True)
@context.configure(name="heat_dataplane", platform="openstack", order=435)
class HeatDataplane(context.OpenStackContext):
"""Context class for create stack by given template.
This context will create stacks by given template for each tenant and
add details to context. Following details will be added:
* id of stack;
* template file contents;
* files dictionary;
* stack parameters;
Heat template should define a "gate" node which will interact with Rally
by ssh and workload nodes by any protocol. To make this possible heat
template should accept the following parameters:
* network_id: id of public network
* router_id: id of external router to connect "gate" node
* key_name: name of nova ssh keypair to use for "gate" node
"""
FILE_SCHEMA = {
"description": "",
"type": "string",
}
RESOURCE_SCHEMA = {
"description": "",
"type": "array",
"minItems": 2,
"maxItems": 2,
"items": {"type": "string"}
}
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"stacks_per_tenant": {
"type": "integer",
"minimum": 1
},
"template": {
"oneOf": [FILE_SCHEMA, RESOURCE_SCHEMA],
},
"files": {
"type": "object",
"additionalProperties": True
},
"parameters": {
"type": "object",
"additionalProperties": True
},
"context_parameters": {
"type": "object",
"additionalProperties": True
},
},
"additionalProperties": False
}
DEFAULT_CONFIG = {
"stacks_per_tenant": 1,
}
def _get_context_parameter(self, user, tenant_id, path):
value = {"user": user, "tenant": self.context["tenants"][tenant_id]}
for key in path.split("."):
try:
# try to cast string to int in order to support integer keys
# e.g 'spam.1.eggs' will be translated to ["spam"][1]["eggs"]
key = int(key)
except ValueError:
pass
try:
value = value[key]
except KeyError:
raise exceptions.RallyException(
"There is no key %s in context" % path)
return value
def _get_public_network_id(self):
nc = osclients.Clients(self.context["admin"]["credential"]).neutron()
networks = nc.list_networks(**{"router:external": True})["networks"]
return networks[0]["id"]
def setup(self):
template = get_data(self.config["template"])
files = {}
for key, filename in self.config.get("files", {}).items():
files[key] = get_data(filename)
parameters = self.config.get("parameters", rutils.LockedDict())
with parameters.unlocked():
if "network_id" not in parameters:
parameters["network_id"] = self._get_public_network_id()
for user, tenant_id in self._iterate_per_tenants():
for name, path in self.config.get("context_parameters",
{}).items():
parameters[name] = self._get_context_parameter(user,
tenant_id,
path)
if "router_id" not in parameters:
networks = self.context["tenants"][tenant_id]["networks"]
parameters["router_id"] = networks[0]["router_id"]
if "key_name" not in parameters:
parameters["key_name"] = user["keypair"]["name"]
heat_scenario = heat_utils.HeatScenario(
{"user": user, "task": self.context["task"],
"owner_id": self.context["owner_id"]})
self.context["tenants"][tenant_id]["stack_dataplane"] = []
for i in range(self.config["stacks_per_tenant"]):
stack = heat_scenario._create_stack(template, files=files,
parameters=parameters)
tenant_data = self.context["tenants"][tenant_id]
tenant_data["stack_dataplane"].append([stack.id, template,
files, parameters])
def cleanup(self):
resource_manager.cleanup(names=["heat.stacks"],
users=self.context.get("users", []),
superclass=heat_utils.HeatScenario,
task_id=self.get_owner_id())
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,683
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/contexts/quotas/test_nova_quotas.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.contexts.quotas import nova_quotas
from tests.unit import test
class NovaQuotasTestCase(test.TestCase):
def setUp(self):
super(NovaQuotasTestCase, self).setUp()
self.quotas = {
"instances": 10,
"cores": 100,
"ram": 100000,
"floating_ips": 100,
"fixed_ips": 10000,
"metadata_items": 5,
"injected_files": 5,
"injected_file_content_bytes": 2048,
"injected_file_path_bytes": 1024,
"key_pairs": 50,
"security_groups": 50,
"security_group_rules": 50,
"server_group_members": 777,
"server_groups": 33
}
def test_update(self):
clients = mock.MagicMock()
nova_quo = nova_quotas.NovaQuotas(clients)
tenant_id = mock.MagicMock()
nova_quo.update(tenant_id, **self.quotas)
clients.nova().quotas.update.assert_called_once_with(tenant_id,
**self.quotas)
def test_delete(self):
clients = mock.MagicMock()
nova_quo = nova_quotas.NovaQuotas(clients)
tenant_id = mock.MagicMock()
nova_quo.delete(tenant_id)
clients.nova().quotas.delete.assert_called_once_with(tenant_id)
def test_get(self):
tenant_id = "tenant_id"
quota_set = mock.MagicMock(**self.quotas)
clients = mock.MagicMock()
clients.nova.return_value.quotas.get.return_value = quota_set
nova_quo = nova_quotas.NovaQuotas(clients)
self.assertEqual(self.quotas, nova_quo.get(tenant_id))
clients.nova().quotas.get.assert_called_once_with(tenant_id)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,684
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/nova/servers.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import logging
from rally.common import validation
from rally_openstack.task.cleanup import manager as resource_manager
from rally_openstack.task import context
from rally_openstack.task.scenarios.nova import utils as nova_utils
from rally_openstack.task import types
LOG = logging.getLogger(__name__)
@validation.add("required_platform", platform="openstack", users=True)
@context.configure(name="servers", platform="openstack", order=430)
class ServerGenerator(context.OpenStackContext):
"""Creates specified amount of Nova Servers per each tenant."""
CONFIG_SCHEMA = {
"type": "object",
"properties": {
"image": {
"description": "Name of image to boot server(s) from.",
"type": "object",
"properties": {
"name": {"type": "string"}
},
"additionalProperties": False
},
"flavor": {
"description": "Name of flavor to boot server(s) with.",
"type": "object",
"properties": {
"name": {"type": "string"}
},
"additionalProperties": False
},
"servers_per_tenant": {
"description": "Number of servers to boot in each Tenant.",
"type": "integer",
"minimum": 1
},
"auto_assign_nic": {
"description": "True if NICs should be assigned.",
"type": "boolean",
},
"nics": {
"type": "array",
"description": "List of networks to attach to server.",
"items": {"oneOf": [
{
"type": "object",
"properties": {"net-id": {"type": "string"}},
"description": "Network ID in a format like OpenStack "
"API expects to see.",
"additionalProperties": False
},
{
"type": "string",
"description": "Network ID."
}
]},
"minItems": 1
}
},
"required": ["image", "flavor"],
"additionalProperties": False
}
DEFAULT_CONFIG = {
"servers_per_tenant": 5,
"auto_assign_nic": False
}
def setup(self):
image = self.config["image"]
flavor = self.config["flavor"]
auto_nic = self.config["auto_assign_nic"]
servers_per_tenant = self.config["servers_per_tenant"]
kwargs = {}
if self.config.get("nics"):
if isinstance(self.config["nics"][0], dict):
# it is a format that Nova API expects
kwargs["nics"] = list(self.config["nics"])
else:
kwargs["nics"] = [{"net-id": nic}
for nic in self.config["nics"]]
image_id = types.GlanceImage(self.context).pre_process(
resource_spec=image, config={})
flavor_id = types.Flavor(self.context).pre_process(
resource_spec=flavor, config={})
for iter_, (user, tenant_id) in enumerate(self._iterate_per_tenants()):
LOG.debug("Booting servers for user tenant %s" % user["tenant_id"])
tmp_context = {"user": user,
"tenant": self.context["tenants"][tenant_id],
"task": self.context["task"],
"owner_id": self.context["owner_id"],
"iteration": iter_}
nova_scenario = nova_utils.NovaScenario(tmp_context)
LOG.debug("Calling _boot_servers with image_id=%(image_id)s "
"flavor_id=%(flavor_id)s "
"servers_per_tenant=%(servers_per_tenant)s"
% {"image_id": image_id,
"flavor_id": flavor_id,
"servers_per_tenant": servers_per_tenant})
servers = nova_scenario._boot_servers(image_id, flavor_id,
requests=servers_per_tenant,
auto_assign_nic=auto_nic,
**kwargs)
current_servers = [server.id for server in servers]
LOG.debug("Adding booted servers %s to context" % current_servers)
self.context["tenants"][tenant_id][
"servers"] = current_servers
def cleanup(self):
resource_manager.cleanup(names=["nova.servers"],
users=self.context.get("users", []),
superclass=nova_utils.NovaScenario,
task_id=self.get_owner_id())
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,685
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/common/services/key_manager/barbican.py
|
# Copyright 2018 Red Hat, Inc. <http://www.redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.task import atomic
from rally.task import service
class BarbicanService(service.Service):
@atomic.action_timer("barbican.list_secrets")
def list_secrets(self):
"""List Secret"""
return self._clients.barbican().secrets.list()
@atomic.action_timer("barbican.create_secret")
def create_secret(self, name=None, payload=None,
payload_content_type=None, payload_content_encoding=None,
algorithm=None, bit_length=None, secret_type=None,
mode=None, expiration=None):
"""Create Secret
:param name: A friendly name for the secret
:param payload: The unecrypted secret data
:param payload_content_type: the format/type of the secret data
:param payload_content_encoding: the encoding of the secret data
:param algorithm: the algorithm associated with this secret key
:param bit_length: The bit length of this secret key
:param mode: the algorigthm mode used with this secret key
:param secret_type: The secret type for this secret key
:param exipration: the expiration time of the secret in ISO8601
format
:returns: a new secret object
"""
name = name or self.generate_random_name()
val = self._clients.barbican().secrets.create(
name=name, payload=payload,
payload_content_type=payload_content_type,
payload_content_encoding=payload_content_encoding,
algorithm=algorithm, bit_length=bit_length, mode=mode,
secret_type=secret_type, expiration=expiration)
val.store()
return val
@atomic.action_timer("barbican.get_secret")
def get_secret(self, secret_ref):
"""Get the secret.
:param secret_name: The name of the secret.
"""
secret = self._clients.barbican().secrets.get(secret_ref)
# secret is lazy, its properties would be filled with real
# values while getting some property.
try:
secret.status
except Exception as e:
from rally import exceptions
raise exceptions.GetResourceFailure(resource=secret, err=e)
return secret
@atomic.action_timer("barbican.delete_secret")
def delete_secret(self, secret_name):
"""Delete the secret
:param secret_name: The name of the secret to delete
"""
return self._clients.barbican().secrets.delete(secret_name)
@atomic.action_timer("barbican.list_container")
def list_container(self):
"""List containers"""
return self._clients.barbican().containers.list()
@atomic.action_timer("barbican.container_delete")
def container_delete(self, container_href):
"""Delete the container
:param container_href: the container reference
"""
return self._clients.barbican().containers.delete(container_href)
@atomic.action_timer("barbican.container_create")
def container_create(self, name=None, secrets=None):
"""Create a generic container
:param name: the name of the container
:param secrets: secrets to populate when creating a container
"""
name = name or self.generate_random_name()
val = self._clients.barbican().containers.create(
name=name, secrets=secrets)
val.store()
return val
@atomic.action_timer("barbican.create_rsa_container")
def create_rsa_container(self, name=None, public_key=None,
private_key=None, private_key_passphrase=None):
"""Create a RSA container
:param name: a friendly name for the container
:param public_key: Secret object containing a Public Key
:param private_key: Secret object containing a Private Key
:param private_key_passphrase: Secret object containing
a passphrase
:returns: RSAContainer
"""
name = name or self.generate_random_name()
val = self._clients.barbican().containers.create_rsa(
name=name, public_key=public_key, private_key=private_key,
private_key_passphrase=private_key_passphrase)
val.store()
return val
@atomic.action_timer("barbican.create_certificate_container")
def create_certificate_container(self, name=None, certificate=None,
intermediates=None, private_key=None,
private_key_passphrase=None):
"""Create a certificate container
:param name: A friendly name for the CertificateContainer
:param certificate: Secret object containing a Certificate
:param intermediates: Secret object containing
Intermediate Certs
:param private_key: Secret object containing a Private Key
:param private_key_passphrase: Secret object containing a passphrase
:returns: CertificateContainer
"""
name = name or self.generate_random_name()
val = self._clients.barbican().containers.create_certificate(
name=name, certificate=certificate, intermediates=intermediates,
private_key=private_key, private_key_passphrase=None)
val.store()
return val
@atomic.action_timer("barbican.orders_list")
def orders_list(self):
"""list orders"""
return self._clients.barbican().orders.list()
@atomic.action_timer("barbican.orders_delete")
def orders_delete(self, order_ref):
"""Delete the order
:param order_ref: The order reference
"""
return self._clients.barbican().orders.delete(order_ref)
@atomic.action_timer("barbican.orders_get")
def orders_get(self, order_ref):
"""Get the order
:param order_ref: The order reference
"""
return self._clients.barbican().orders.get(order_ref)
@atomic.action_timer("barbican.create_key")
def create_key(self, name=None, algorithm="aes", bit_length=256, mode=None,
payload_content_type=None, expiration=None):
"""Create a key order object
:param name: A friendly name for the secret to be created
:param algorithm: The algorithm associated with this secret key
:param bit_length: The bit length of this secret key
:param mode: The algorithm mode used with this secret key
:param payload_content_type: The format/type of the secret data
:param expiration: The expiration time of the secret
in ISO 8601 format
:returns: KeyOrder
"""
name = name or self.generate_random_name()
order = self._clients.barbican().orders.create_key(
name=name, algorithm=algorithm, bit_length=bit_length,
mode=mode, payload_content_type=payload_content_type,
expiration=expiration)
order.submit()
return order
@atomic.action_timer("barbican.create_asymmetric")
def create_asymmetric(self, name=None, algorithm="aes", bit_length=256,
pass_phrase=None, payload_content_type=None,
expiration=None):
"""Create an asymmetric order object
:param name: A friendly name for the container to be created
:param algorithm: The algorithm associated with this secret key
:param bit_length: The bit length of this secret key
:param pass_phrase: Optional passphrase
:param payload_content_type: The format/type of the secret data
:param expiration: The expiration time of the secret
in ISO 8601 format
:returns: AsymmetricOrder
"""
name = name or self.generate_random_name()
order = self._clients.barbican().orders.create_asymmetric(
name=name, algorithm=algorithm, bit_length=bit_length,
pass_phrase=pass_phrase, payload_content_type=payload_content_type,
expiration=expiration)
order.submit()
return order
@atomic.action_timer("barbican.create_certificate")
def create_certificate(self, name=None, request_type=None, subject_dn=None,
source_container_ref=None, ca_id=None, profile=None,
request_data=None):
"""Create a certificate order object
:param name: A friendly name for the container to be created
:param request_type: The type of the certificate request
:param subject_dn: A subject for the certificate
:param source_container_ref: A container with a
public/private key pair to use as source for stored-key
requests
:param ca_id: The identifier of the CA to use
:param profile: The profile of certificate to use
:param request_data: The CSR content
:returns: CertificateOrder
"""
name = name or self.generate_random_name()
order = self._clients.barbican().orders.create_certificate(
name=name, request_type=request_type, subject_dn=subject_dn,
source_container_ref=source_container_ref, ca_id=ca_id,
profile=profile, request_data=request_data)
order.submit()
return order
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,686
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/api_versions.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from rally.common import validation
from rally import exceptions
from rally_openstack.common import consts
from rally_openstack.common import osclients
from rally_openstack.task import context
@validation.configure("check_api_versions")
class CheckOpenStackAPIVersionsValidator(validation.Validator):
"""Additional validation for api_versions context"""
def validate(self, context, config, plugin_cls, plugin_cfg):
for client in plugin_cfg:
client_cls = osclients.OSClient.get(client)
try:
if ("service_type" in plugin_cfg[client]
or "service_name" in plugin_cfg[client]):
client_cls.is_service_type_configurable()
if "version" in plugin_cfg[client]:
client_cls.validate_version(plugin_cfg[client]["version"])
except exceptions.RallyException as e:
return self.fail(
"Invalid settings for '%(client)s': %(error)s" % {
"client": client,
"error": e.format_message()})
@validation.add("check_api_versions")
@context.configure(name="api_versions", platform="openstack", order=150)
class OpenStackAPIVersions(context.OpenStackContext):
"""Context for specifying OpenStack clients versions and service types.
Some OpenStack services support several API versions. To recognize
the endpoints of each version, separate service types are provided in
Keystone service catalog.
Rally has the map of default service names - service types. But since
service type is an entity, which can be configured manually by admin(
via keystone api) without relation to service name, such map can be
insufficient.
Also, Keystone service catalog does not provide a map types to name
(this statement is true for keystone < 3.3 ).
This context was designed for not-default service types and not-default
API versions usage.
An example of specifying API version:
.. code-block:: json
# In this example we will launch NovaKeypair.create_and_list_keypairs
# scenario on 2.2 api version.
{
"NovaKeypair.create_and_list_keypairs": [
{
"args": {
"key_type": "x509"
},
"runner": {
"type": "constant",
"times": 10,
"concurrency": 2
},
"context": {
"users": {
"tenants": 3,
"users_per_tenant": 2
},
"api_versions": {
"nova": {
"version": 2.2
}
}
}
}
]
}
An example of specifying API version along with service type:
.. code-block:: json
# In this example we will launch CinderVolumes.create_and_attach_volume
# scenario on Cinder V2
{
"CinderVolumes.create_and_attach_volume": [
{
"args": {
"size": 10,
"image": {
"name": "^cirros.*-disk$"
},
"flavor": {
"name": "m1.tiny"
},
"create_volume_params": {
"availability_zone": "nova"
}
},
"runner": {
"type": "constant",
"times": 5,
"concurrency": 1
},
"context": {
"users": {
"tenants": 2,
"users_per_tenant": 2
},
"api_versions": {
"cinder": {
"version": 2,
"service_type": "volumev2"
}
}
}
}
]
}
Also, it possible to use service name as an identifier of service endpoint,
but an admin user is required (Keystone can return map of service
names - types, but such API is permitted only for admin). An example:
.. code-block:: json
# Similar to the previous example, but `service_name` argument is used
# instead of `service_type`
{
"CinderVolumes.create_and_attach_volume": [
{
"args": {
"size": 10,
"image": {
"name": "^cirros.*-disk$"
},
"flavor": {
"name": "m1.tiny"
},
"create_volume_params": {
"availability_zone": "nova"
}
},
"runner": {
"type": "constant",
"times": 5,
"concurrency": 1
},
"context": {
"users": {
"tenants": 2,
"users_per_tenant": 2
},
"api_versions": {
"cinder": {
"version": 2,
"service_name": "cinderv2"
}
}
}
}
]
}
"""
VERSION_SCHEMA = {
"anyOf": [
{"type": "string", "description": "a string-like version."},
{"type": "number", "description": "a number-like version."}
]
}
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"patternProperties": {
"^[a-z]+$": {
"type": "object",
"oneOf": [
{
"description": "version only",
"properties": {
"version": VERSION_SCHEMA,
},
"required": ["version"],
"additionalProperties": False
},
{
"description": "version and service_name",
"properties": {
"version": VERSION_SCHEMA,
"service_name": {"type": "string"}
},
"required": ["service_name"],
"additionalProperties": False
},
{
"description": "version and service_type",
"properties": {
"version": VERSION_SCHEMA,
"service_type": {"type": "string"}
},
"required": ["service_type"],
"additionalProperties": False
}
],
}
},
"minProperties": 1,
"additionalProperties": False
}
def setup(self):
# FIXME(andreykurilin): move all checks to validate method.
# use admin only when `service_name` is presented
admin_clients = osclients.Clients(
self.context.get("admin", {}).get("credential"))
clients = osclients.Clients(random.choice(
self.context["users"])["credential"])
services = clients.keystone.service_catalog.get_endpoints()
services_from_admin = None
for client_name, conf in self.config.items():
if "service_type" in conf and conf["service_type"] not in services:
raise exceptions.ValidationError(
"There is no service with '%s' type in your environment."
% conf["service_type"])
elif "service_name" in conf:
if not self.context.get("admin", {}).get("credential"):
raise exceptions.ContextSetupFailure(
ctx_name=self.get_name(),
msg="Setting 'service_name' is admin only operation.")
if not services_from_admin:
services_from_admin = dict(
[(s.name, s.type)
for s in admin_clients.keystone().services.list()])
if conf["service_name"] not in services_from_admin:
raise exceptions.ValidationError(
"There is no '%s' service in your environment"
% conf["service_name"])
# TODO(boris-42): Use separate key ["openstack"]["versions"]
self.context["config"]["api_versions@openstack"][client_name][
"service_type"] = services_from_admin[conf["service_name"]]
admin_cred = self.context.get("admin", {}).get("credential")
if admin_cred:
admin_cred["api_info"].update(
self.context["config"]["api_versions@openstack"]
)
for user in self.context["users"]:
user["credential"]["api_info"].update(
self.context["config"]["api_versions@openstack"]
)
def cleanup(self):
# nothing to do here
pass
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,687
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/functional/extra/fake_dir/fake_plugin.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally_openstack.common import osclients
from rally_openstack.task import scenario
@osclients.configure("fakedummy", default_version="1",
default_service_type="dummy",
supported_versions=["1", "2"])
class FakeDummy(osclients.OSClient):
def create_client(self, version=None, service_type=None):
version = self.choose_version(version)
service_type = self.choose_service_type(service_type)
return {"version": version, "service_type": service_type}
@osclients.configure("faileddummy", default_version="1",
default_service_type="faileddummy",
supported_versions=["1", "2"])
class FailedDummy(osclients.OSClient):
def create_client(self, version=None, service_type=None):
raise Exception("Failed Dummy")
@scenario.configure(name="FakeDummy.openstack_api")
class FakeDummyOpenstackAPI(scenario.OpenStackScenario):
def run(self):
admin_client = self.admin_clients("fakedummy")
self.assertEqual("dummyv2", admin_client["service_type"])
self.assertEqual("2", admin_client["version"])
client = self.clients("fakedummy")
self.assertEqual("dummyv2", client["service_type"])
self.assertEqual("2", client["version"])
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,688
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/__init__.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
from rally.common import version as __rally_version__
from rally_openstack import _compat
__rally_version__ = __rally_version__.version_info.semantic_version()
__rally_version__ = __rally_version__.version_tuple()
__version_info__ = pbr.version.VersionInfo("rally-openstack")
__version__ = __version_info__.version_string()
__version_tuple__ = __version_info__.semantic_version().version_tuple()
# WARNING: IF YOU ARE LOOKING FOR SOME PHYSICALLY UNEXISTING MODULES THAT CAN
# BE IMPORTED (FOR BACKWARD COMPATIBILITY), PLEASE CHECK THE NEXT FUNCTION
# HAPPY DEBUGGING!!
_compat.init()
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,689
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/swift/utils.py
|
# Copyright 2015: Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.task import atomic
from rally_openstack.task import scenario
class SwiftScenario(scenario.OpenStackScenario):
"""Base class for Swift scenarios with basic atomic actions."""
@atomic.action_timer("swift.list_containers")
def _list_containers(self, full_listing=True, **kwargs):
"""Return list of containers.
:param full_listing: bool, enable unlimit number of listing returned
:param kwargs: dict, other optional parameters to get_account
:returns: tuple, (dict of response headers, a list of containers)
"""
return self.clients("swift").get_account(full_listing=full_listing,
**kwargs)
@atomic.action_timer("swift.create_container")
def _create_container(self, public=False, **kwargs):
"""Create a new container.
:param public: bool, set container as public
:param kwargs: dict, other optional parameters to put_container
:returns: container name
"""
if public:
kwargs.setdefault("headers", {})
kwargs["headers"].setdefault("X-Container-Read", ".r:*,.rlistings")
container_name = self.generate_random_name()
self.clients("swift").put_container(container_name, **kwargs)
return container_name
@atomic.action_timer("swift.delete_container")
def _delete_container(self, container_name, **kwargs):
"""Delete a container with given name.
:param container_name: str, name of the container to delete
:param kwargs: dict, other optional parameters to delete_container
"""
self.clients("swift").delete_container(container_name, **kwargs)
@atomic.action_timer("swift.list_objects")
def _list_objects(self, container_name, full_listing=True, **kwargs):
"""Return objects inside container.
:param container_name: str, name of the container to make the list
objects operation against
:param full_listing: bool, enable unlimit number of listing returned
:param kwargs: dict, other optional parameters to get_container
:returns: tuple, (dict of response headers, a list of objects)
"""
return self.clients("swift").get_container(container_name,
full_listing=full_listing,
**kwargs)
@atomic.action_timer("swift.upload_object")
def _upload_object(self, container_name, content, **kwargs):
"""Upload content to a given container.
:param container_name: str, name of the container to upload object to
:param content: file stream, content to upload
:param kwargs: dict, other optional parameters to put_object
:returns: tuple, (etag and object name)
"""
object_name = self.generate_random_name()
return (self.clients("swift").put_object(container_name, object_name,
content, **kwargs),
object_name)
@atomic.action_timer("swift.download_object")
def _download_object(self, container_name, object_name, **kwargs):
"""Download object from container.
:param container_name: str, name of the container to download object
from
:param object_name: str, name of the object to download
:param kwargs: dict, other optional parameters to get_object
:returns: tuple, (dict of response headers, the object's contents)
"""
return self.clients("swift").get_object(container_name, object_name,
**kwargs)
@atomic.action_timer("swift.delete_object")
def _delete_object(self, container_name, object_name, **kwargs):
"""Delete object from container.
:param container_name: str, name of the container to delete object from
:param object_name: str, name of the object to delete
:param kwargs: dict, other optional parameters to delete_object
"""
self.clients("swift").delete_object(container_name, object_name,
**kwargs)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,690
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/neutron/test_utils.py
|
# Copyright 2013: Intel Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
import netaddr
from rally import exceptions
from rally_openstack.common import consts
from rally_openstack.common import credential
from rally_openstack.task.scenarios.neutron import utils
from tests.unit import test
NETWORK_SERVICE = "rally_openstack.common.services.network"
NET_UTILS = "%s.net_utils" % NETWORK_SERVICE
NEUTRON_UTILS = "rally_openstack.task.scenarios.neutron.utils"
@ddt.ddt
class NeutronScenarioTestCase(test.ScenarioTestCase):
def setUp(self):
super(NeutronScenarioTestCase, self).setUp()
self.network = mock.Mock()
self._clients = mock.MagicMock(
credential=credential.OpenStackCredential(
auth_url="example.com",
username="root",
password="changeme",
permission=consts.EndpointPermission.ADMIN
)
)
self._nc = self._clients.neutron.return_value
self.scenario = utils.NeutronScenario(self.context,
clients=self._clients)
self.random_name = "random_name"
name_generator = mock.Mock(return_value=self.random_name)
self.scenario.generate_random_name = name_generator
self.scenario.neutron._name_generator = name_generator
def test__get_network_id(self):
networks = [{"id": "foo-id", "name": "foo-network"},
{"id": "bar-id", "name": "bar-network"}]
network_id = "foo-id"
# Valid network-name
network = "foo-network"
self._nc.list_networks = mock.Mock(return_value={"networks": networks})
resultant_network_id = self.scenario._get_network_id(network)
self.assertEqual(network_id, resultant_network_id)
self._nc.list_networks.assert_called_once_with()
self._nc.list_networks.reset_mock()
# Valid network-id
network = "foo-id"
resultant_network_id = self.scenario._get_network_id(network)
self.assertEqual(network_id, resultant_network_id)
self._nc.list_networks.assert_called_once_with()
self._nc.list_networks.reset_mock()
# Invalid network-name
network = "absent-network"
self.assertRaises(exceptions.NotFoundException,
self.scenario._get_network_id, network)
self._nc.list_networks.assert_called_once_with()
def test_create_network(self):
network = {"network": mock.Mock()}
self._nc.create_network.return_value = network
network_data = {"admin_state_up": False}
self.assertEqual(network, self.scenario._create_network(network_data))
expected_network_data = {"network": network_data}
network_data["name"] = self.scenario.generate_random_name.return_value
self._nc.create_network.assert_called_once_with(expected_network_data)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.create_network")
def test_list_networks(self):
networks_list = []
networks_dict = {"networks": networks_list}
self._nc.list_networks.return_value = networks_dict
return_networks_list = self.scenario._list_networks()
self.assertEqual(networks_list, return_networks_list)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.list_networks", count=1)
def test_show_network(self):
network = {
"network": {
"id": "fake-id",
"name": "fake-name",
"admin_state_up": False
}
}
return_network = self.scenario._show_network(network)
self.assertEqual(
{"network": self._nc.show_network.return_value["network"]},
return_network)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.show_network")
def test_show_router(self):
router = {
"router": {
"id": "fake-id",
"name": "fake-name",
"admin_state_up": False
}
}
return_router = self.scenario._show_router(router)
self.assertEqual(
{"router": self._nc.show_router.return_value["router"]},
return_router)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.show_router")
def test_update_network(self):
expected_network = {
"network": {
"name": self.scenario.generate_random_name.return_value,
"admin_state_up": False
}
}
self._nc.update_network.return_value = expected_network
network = {"network": {"name": "network-name", "id": "network-id"}}
network_update_args = {"name": "foo", "admin_state_up": False}
result_network = self.scenario._update_network(network,
network_update_args)
self._nc.update_network.assert_called_once_with(
network["network"]["id"], expected_network)
self.assertEqual(expected_network, result_network)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.update_network")
def test_delete_network(self):
net_id = "foo"
network = {"id": net_id}
self.scenario._delete_network(network)
self._nc.delete_network.assert_called_once_with(net_id)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.delete_network")
@mock.patch("%s.generate_cidr" % NET_UTILS)
def test_create_subnet(self, mock_generate_cidr):
network_id = "fake-id"
start_cidr = "192.168.0.0/24"
mock_generate_cidr.return_value = (4, "192.168.0.0/24")
network = {"network": {"id": network_id}}
expected_subnet_data = {
"subnet": {
"network_id": network_id,
"cidr": start_cidr,
"ip_version": netaddr.IPNetwork(start_cidr).version,
"name": self.scenario.generate_random_name.return_value,
"dns_nameservers": mock.ANY
}
}
# Default options
subnet_data = {"network_id": network_id}
self.scenario._create_subnet(network, subnet_data, start_cidr)
self._nc.create_subnet.assert_called_once_with(
expected_subnet_data)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.create_subnet")
self._nc.create_subnet.reset_mock()
# Custom options
extras = {"cidr": "2001::/64", "allocation_pools": []}
extras["ip_version"] = netaddr.IPNetwork(extras["cidr"]).version
mock_generate_cidr.return_value = (6, "2001::/64")
subnet_data.update(extras)
expected_subnet_data["subnet"].update(extras)
self.scenario._create_subnet(network, subnet_data)
self._nc.create_subnet.assert_called_once_with(expected_subnet_data)
def test_list_subnets(self):
subnets = [{"name": "fake1"}, {"name": "fake2"}]
self._nc.list_subnets.return_value = {"subnets": subnets}
result = self.scenario._list_subnets()
self.assertEqual(subnets, result)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.list_subnets")
def test_show_subnet(self):
subnet = {"subnet": {"name": "fake-name", "id": "fake-id"}}
result_subnet = self.scenario._show_subnet(subnet)
self.assertEqual(
{"subnet": self._nc.show_subnet.return_value["subnet"]},
result_subnet)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.show_subnet")
def test_update_subnet(self):
expected_subnet = {
"subnet": {
"name": self.scenario.generate_random_name.return_value,
"enable_dhcp": False
}
}
self._nc.update_subnet.return_value = expected_subnet
subnet = {"subnet": {"name": "subnet-name", "id": "subnet-id"}}
subnet_update_args = {"name": "foo", "enable_dhcp": False}
result_subnet = self.scenario._update_subnet(subnet,
subnet_update_args)
self._nc.update_subnet.assert_called_once_with(
subnet["subnet"]["id"], expected_subnet)
self.assertEqual(expected_subnet, result_subnet)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.update_subnet")
def test_delete_subnet(self):
network = self.scenario._create_network({})
subnet = self.scenario._create_subnet(network, {})
self.scenario._delete_subnet(subnet)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.delete_subnet")
def test_create_router(self):
router = self._nc.create_router.return_value
# Default options
result_router = self.scenario._create_router({})
self._nc.create_router.assert_called_once_with({
"router": {
"name": self.scenario.generate_random_name.return_value
}
})
self.assertEqual({"router": router["router"]}, result_router)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.create_router")
def test_create_router_with_ext_gw(self):
self._clients.credential.permission = consts.EndpointPermission.ADMIN
net_id = "ext-net"
self._nc.list_networks.return_value = {
"networks": [{"id": net_id, "router:external": True}]
}
self._nc.list_extensions.return_value = {
"extensions": [{"alias": "ext-gw-mode"}]}
# External_gw options
gw_info = {"network_id": net_id, "enable_snat": True}
router_data = {
"name": self.scenario.generate_random_name.return_value,
"external_gateway_info": gw_info
}
result_router = self.scenario._create_router({}, external_gw=True)
self._nc.create_router.assert_called_once_with(
{"router": router_data})
self.assertEqual(
{"router": self._nc.create_router.return_value["router"]},
result_router
)
self._test_atomic_action_timer(
self.scenario.atomic_actions(), "neutron.create_router")
def test_create_router_with_ext_gw_but_no_ext_net(self):
self._nc.list_networks.return_value = {"networks": []}
self._nc.list_extensions.return_value = {
"extensions": [{"alias": "ext-gw-mode"}]
}
# External_gw options with no external networks in list_networks()
result_router = self.scenario._create_router({}, external_gw=True)
self._nc.create_router.assert_called_once_with({
"router": {"name": self.scenario.generate_random_name.return_value}
})
self.assertEqual(
{"router": self._nc.create_router.return_value["router"]},
result_router
)
self._nc.list_networks.assert_called_once_with(
**{"router:external": True}
)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.create_router")
def test_create_router_with_ext_gw_but_no_ext_gw_mode_extension(self):
net_id = "ext-net"
self._nc.list_networks.return_value = {
"networks": [{"id": net_id, "router:external": True}]
}
self._nc.list_extensions.return_value = {"extensions": []}
result_router = self.scenario._create_router({}, external_gw=True)
router_data = {
"name": self.scenario.generate_random_name.return_value,
"external_gateway_info": {"network_id": net_id}
}
self._nc.create_router.assert_called_once_with({"router": router_data})
self.assertEqual(
{"router": self._nc.create_router.return_value["router"]},
result_router
)
self._test_atomic_action_timer(
self.scenario.atomic_actions(), "neutron.create_router")
def test_list_routers(self):
routers = [mock.Mock()]
self._nc.list_routers.return_value = {"routers": routers}
self.assertEqual(routers, self.scenario._list_routers())
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.list_routers")
def test_list_agents(self):
agents = [mock.Mock()]
self._nc.list_agents.return_value = {"agents": agents}
self.assertEqual(agents, self.scenario._list_agents())
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.list_agents")
def test_update_router(self):
expected_router = {
"router": {
"name": self.scenario.generate_random_name.return_value,
"admin_state_up": False
}
}
self._nc.update_router.return_value = expected_router
router = {
"router": {
"id": "router-id",
"name": "router-name",
"admin_state_up": True
}
}
router_update_args = {"name": "foo", "admin_state_up": False}
result_router = self.scenario._update_router(router,
router_update_args)
self._nc.update_router.assert_called_once_with(
router["router"]["id"], expected_router)
self.assertEqual(expected_router, result_router)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.update_router")
def test_delete_router(self):
router_id = "foo"
router = {"router": {"id": router_id}}
self.scenario._delete_router(router)
self._nc.delete_router.assert_called_once_with(router_id)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.delete_router")
def test_remove_interface_router(self):
subnet = {"name": "subnet-name", "id": "subnet-id"}
router = {"id": 1}
self.scenario._add_interface_router(subnet, router)
self.scenario._remove_interface_router(subnet, router)
self._nc.remove_interface_router.assert_called_once_with(
router["id"], {"subnet_id": subnet["id"]})
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.remove_interface_router")
def test_add_gateway_router(self):
ext_net = {
"network": {
"name": "extnet-name",
"id": "extnet-id"
}
}
router = {
"router": {
"name": "router-name",
"id": "router-id"
}
}
enable_snat = "fake_snat"
self._nc.list_extensions.return_value = {
"extensions": [{"alias": "ext-gw-mode"}]}
self.scenario._add_gateway_router(router, ext_net, enable_snat)
self._nc.add_gateway_router.assert_called_once_with(
router["router"]["id"],
{"network_id": ext_net["network"]["id"],
"enable_snat": enable_snat})
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.add_gateway_router")
def test_add_gateway_router_no_snat_update(self):
ext_net = {
"network": {
"name": "extnet-name",
"id": "extnet-id"
}
}
router = {
"router": {
"name": "router-name",
"id": "router-id"
}
}
self._nc.list_extensions.return_value = {
"extensions": [{"alias": "ext-gw-mode"}]}
self.scenario._add_gateway_router(router, ext_net)
self._nc.add_gateway_router.assert_called_once_with(
router["router"]["id"],
{"network_id": ext_net["network"]["id"]}
)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.add_gateway_router")
def test_add_gateway_router_without_ext_gw_mode_extension(self):
ext_net = {
"network": {
"name": "extnet-name",
"id": "extnet-id"
}
}
router = {
"router": {
"name": "router-name",
"id": "router-id"
}
}
self._nc.list_extensions.return_value = {
"extensions": {}}
self.scenario._add_gateway_router(router, ext_net, enable_snat=True)
self._nc.add_gateway_router.assert_called_once_with(
router["router"]["id"], {"network_id": ext_net["network"]["id"]})
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.add_gateway_router")
def test_remove_gateway_router(self):
router = {
"router": {
"name": "router-name",
"id": "router-id"
}
}
self.scenario._remove_gateway_router(router)
self._nc.remove_gateway_router.assert_called_once_with(
router["router"]["id"])
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.remove_gateway_router")
def test_create_port(self):
net_id = "network-id"
net = {"network": {"id": net_id}}
expected_port_args = {
"port": {
"network_id": net_id,
"name": self.scenario.generate_random_name.return_value
}
}
# Defaults
port_create_args = {}
self.scenario._create_port(net, port_create_args)
self._nc.create_port.assert_called_once_with(expected_port_args)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.create_port")
self._nc.create_port.reset_mock()
# Custom options
port_args = {"admin_state_up": True}
expected_port_args["port"].update(port_args)
self.scenario._create_port(net, port_args)
self._nc.create_port.assert_called_once_with(expected_port_args)
def test_list_ports(self):
ports = [{"name": "port1"}, {"name": "port2"}]
self._nc.list_ports.return_value = {"ports": ports}
self.assertEqual(ports, self.scenario._list_ports())
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.list_ports")
def test_show_port(self):
expect_port = {
"port": {
"id": "port-id",
"name": "port-name",
"admin_state_up": True
}
}
self._nc.show_port.return_value = expect_port
self.assertEqual(expect_port, self.scenario._show_port(expect_port))
self._nc.show_port.assert_called_once_with(
expect_port["port"]["id"])
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.show_port")
def test_update_port(self):
expected_port = {
"port": {
"admin_state_up": False,
"name": self.scenario.generate_random_name.return_value
}
}
self._nc.update_port.return_value = expected_port
port = {
"port": {
"id": "port-id",
"name": "port-name",
"admin_state_up": True
}
}
port_update_args = {"admin_state_up": False}
result_port = self.scenario._update_port(port, port_update_args)
self._nc.update_port.assert_called_once_with(
port["port"]["id"], expected_port)
self.assertEqual(expected_port, result_port)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.update_port")
def test_delete_port(self):
network = self.scenario._create_network({})
port = self.scenario._create_port(network, {})
self.scenario._delete_port(port)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.create_port")
@ddt.data(
{"context": {"tenant": {"networks":
[mock.MagicMock(), mock.MagicMock()]}}},
{"network_create_args": {"fakearg": "fake"},
"context": {"tenant": {"networks":
[mock.MagicMock(), mock.MagicMock()]}}})
@ddt.unpack
@mock.patch("random.choice", side_effect=lambda l: l[0])
def test_get_or_create_network(self, mock_random_choice,
network_create_args=None, context=None):
self.scenario.context = context
self.scenario._create_network = mock.Mock(
return_value={"network": mock.Mock()})
network = self.scenario._get_or_create_network(network_create_args)
# ensure that the return value is the proper type either way
self.assertIn("network", network)
if "networks" in context["tenant"]:
self.assertEqual(network,
{"network": context["tenant"]["networks"][0]})
self.assertFalse(self.scenario._create_network.called)
else:
self.assertEqual(network,
self.scenario._create_network.return_value)
self.scenario._create_network.assert_called_once_with(
network_create_args or {})
def test_create_network_and_subnets(self):
self._nc.create_network.return_value = {"network": {"id": "fake-id"}}
self._nc.create_subnet.return_value = {
"subnet": {
"name": "subnet-name",
"id": "subnet-id",
"enable_dhcp": False
}
}
network_create_args = {}
subnet_create_args = {}
subnets_per_network = 4
# Default options
self.scenario._create_network_and_subnets(
network_create_args=network_create_args,
subnet_create_args=subnet_create_args,
subnets_per_network=subnets_per_network)
self._nc.create_network.assert_called_once_with(
{"network": {"name": self.random_name}}
)
self.assertEqual(
[
mock.call(
{"subnet": {"name": self.random_name,
"network_id": "fake-id",
"dns_nameservers": mock.ANY,
"ip_version": 4, "cidr": mock.ANY}
}
)
] * subnets_per_network,
self._nc.create_subnet.call_args_list
)
self._nc.create_network.reset_mock()
self._nc.create_subnet.reset_mock()
# Custom options
self.scenario._create_network_and_subnets(
network_create_args=network_create_args,
subnet_create_args={"allocation_pools": ["x"]},
subnet_cidr_start="10.10.10.0/24",
subnets_per_network=subnets_per_network)
self._nc.create_network.assert_called_once_with(
{"network": {"name": self.random_name}}
)
self.assertEqual(
[
mock.call(
{"subnet": {"name": self.random_name,
"network_id": "fake-id",
"allocation_pools": ["x"],
"dns_nameservers": mock.ANY,
"ip_version": 4, "cidr": mock.ANY}
}
)
] * subnets_per_network,
self._nc.create_subnet.call_args_list
)
def test_list_floating_ips(self):
fips_list = [{"id": "floating-ip-id"}]
fips_dict = {"floatingips": fips_list}
self._nc.list_floatingips.return_value = fips_dict
self.assertEqual(self.scenario._list_floating_ips(),
self._nc.list_floatingips.return_value)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.list_floating_ips")
def test_delete_floating_ip(self):
fip = {"floatingip": {"id": "fake-id"}}
self.scenario._delete_floating_ip(fip["floatingip"])
self._nc.delete_floatingip.assert_called_once_with(
fip["floatingip"]["id"])
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.delete_floating_ip")
def test_associate_floating_ip(self):
fip = {"id": "fip-id"}
port = {"id": "port-id"}
self.scenario._associate_floating_ip(fip, port)
self._nc.update_floatingip.assert_called_once_with(
"fip-id", {"floatingip": {"port_id": "port-id"}})
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.associate_floating_ip")
def test_dissociate_floating_ip(self):
fip = {"id": "fip-id"}
self.scenario._dissociate_floating_ip(fip)
self._nc.update_floatingip.assert_called_once_with(
"fip-id", {"floatingip": {"port_id": None}})
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.dissociate_floating_ip")
@ddt.data(
{},
{"router_create_args": {"admin_state_up": False}},
{"network_create_args": {"router:external": True},
"subnet_create_args": {"allocation_pools": ["x"]},
"subnets_per_network": 3,
"router_create_args": {"admin_state_up": False}})
@ddt.unpack
def test_create_network_structure(self, network_create_args=None,
subnet_create_args=None,
subnet_cidr_start=None,
subnets_per_network=1,
router_create_args=None):
network_id = "net-id"
network = {"network": {"id": network_id}}
router_create_args = router_create_args or {}
subnets = []
subnet_create_calls = []
routers = []
router_create_calls = []
for i in range(subnets_per_network):
subnets.append({"subnet": mock.MagicMock()})
routers.append({"router": mock.MagicMock()})
subnet_create_calls.append(
mock.call({
"subnet": {
"network_id": network_id,
"name": self.random_name,
"dns_nameservers": mock.ANY,
"ip_version": 4,
"cidr": mock.ANY,
**(subnet_create_args or {})
}
}))
router_create_calls.append(
mock.call({
"router": {
"name": self.random_name,
**(router_create_args or {})
}
}))
self._nc.create_network.return_value = network
self._nc.create_subnet.side_effect = subnets
self._nc.create_router.side_effect = routers
actual = self.scenario._create_network_structure(network_create_args,
subnet_create_args,
subnet_cidr_start,
subnets_per_network,
router_create_args)
self.assertEqual((network, subnets, routers), actual)
network_create_args = network_create_args or {}
network_create_args["name"] = self.random_name
self._nc.create_network.assert_called_once_with(
{"network": network_create_args})
self.assertEqual(
subnet_create_calls, self._nc.create_subnet.call_args_list
)
self.assertEqual(
router_create_calls, self._nc.create_router.call_args_list
)
add_iface_calls = [
mock.call(
routers[i]["router"]["id"],
{"subnet_id": subnets[i]["subnet"]["id"]}
)
for i in range(subnets_per_network or 1)]
self.assertEqual(
add_iface_calls,
self._nc.add_interface_router.call_args_list
)
def test_delete_v1_pool(self):
pool = {"pool": {"id": "fake-id"}}
self.scenario._delete_v1_pool(pool["pool"])
self.clients("neutron").delete_pool.assert_called_once_with(
pool["pool"]["id"])
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.delete_pool")
def test_update_pool(self):
expected_pool = {
"pool": {
"name": self.scenario.generate_random_name.return_value,
"admin_state_up": False,
"fakearg": "fake"
}
}
self.clients("neutron").update_pool.return_value = expected_pool
pool = {"pool": {"name": "pool-name", "id": "pool-id"}}
pool_update_args = {"name": "foo",
"admin_state_up": False,
"fakearg": "fake"}
result_pool = self.scenario._update_v1_pool(pool, **pool_update_args)
self.assertEqual(expected_pool, result_pool)
self.clients("neutron").update_pool.assert_called_once_with(
pool["pool"]["id"], expected_pool)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.update_pool")
def test_list_v1_pools(self):
pools_list = []
pools_dict = {"pools": pools_list}
self.clients("neutron").list_pools.return_value = pools_dict
return_pools_dict = self.scenario._list_v1_pools()
self.assertEqual(pools_dict, return_pools_dict)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.list_pools")
def test_list_v1_vips(self):
vips_list = []
vips_dict = {"vips": vips_list}
self.clients("neutron").list_vips.return_value = vips_dict
return_vips_dict = self.scenario._list_v1_vips()
self.assertEqual(vips_dict, return_vips_dict)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.list_vips")
def test_delete_v1_vip(self):
vip = {"vip": {"id": "fake-id"}}
self.scenario._delete_v1_vip(vip["vip"])
self.clients("neutron").delete_vip.assert_called_once_with(
vip["vip"]["id"])
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.delete_vip")
def test_update_v1_vip(self):
expected_vip = {
"vip": {
"name": self.scenario.generate_random_name.return_value,
"admin_state_up": False
}
}
self.clients("neutron").update_vip.return_value = expected_vip
vip = {"vip": {"name": "vip-name", "id": "vip-id"}}
vip_update_args = {"name": "foo", "admin_state_up": False}
result_vip = self.scenario._update_v1_vip(vip, **vip_update_args)
self.assertEqual(expected_vip, result_vip)
self.clients("neutron").update_vip.assert_called_once_with(
vip["vip"]["id"], expected_vip)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.update_vip")
def test_create_security_group(self):
security_group_create_args = {"description": "Fake security group"}
expected_security_group = {
"security_group": {
"id": "fake-id",
"name": self.scenario.generate_random_name.return_value,
"description": "Fake security group"
}
}
self._nc.create_security_group.return_value = expected_security_group
security_group_data = {
"security_group":
{"name": "random_name",
"description": "Fake security group"}
}
resultant_security_group = self.scenario._create_security_group(
**security_group_create_args)
self.assertEqual(expected_security_group, resultant_security_group)
self._nc.create_security_group.assert_called_once_with(
security_group_data)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.create_security_group")
def test_list_security_groups(self):
security_groups_list = [{"id": "security-group-id"}]
security_groups_dict = {"security_groups": security_groups_list}
self._nc.list_security_groups = mock.Mock(
return_value=security_groups_dict)
self.assertEqual(
self.scenario._list_security_groups(),
self._nc.list_security_groups.return_value)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.list_security_groups")
def test_show_security_group(self):
security_group = {"security_group": {"id": "fake-id"}}
result = self.scenario._show_security_group(security_group)
self.assertEqual(
{"security_group":
self._nc.show_security_group.return_value["security_group"]},
result
)
self._nc.show_security_group.assert_called_once_with(
security_group["security_group"]["id"])
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.show_security_group")
def test_delete_security_group(self):
security_group = {"security_group": {"id": "fake-id"}}
self.scenario._delete_security_group(security_group)
self._nc.delete_security_group.assert_called_once_with(
security_group["security_group"]["id"])
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.delete_security_group")
def test_update_security_group(self):
security_group = {
"security_group": {
"id": "security-group-id",
"description": "Not updated"
}
}
expected_security_group = {
"security_group": {
"id": "security-group-id",
"name": self.scenario.generate_random_name.return_value,
"description": "Updated"
}
}
self._nc.update_security_group.return_value = expected_security_group
result_security_group = self.scenario._update_security_group(
security_group, description="Updated")
self._nc.update_security_group.assert_called_once_with(
security_group["security_group"]["id"],
{"security_group": {
"description": "Updated",
"name": self.scenario.generate_random_name.return_value}}
)
self.assertEqual(expected_security_group, result_security_group)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.update_security_group")
def test_create_security_group_rule(self):
security_group_rule_args = {"description": "Fake Rule"}
expected_rules = {
"security_group_rule": {
"id": "fake-id",
"security_group_id": "security-group-id",
"direction": "ingress",
"protocol": "tcp",
"description": "Fake Rule"
}
}
self._nc.create_security_group_rule.return_value = expected_rules
security_group_rule_data = {
"security_group_rule":
{"security_group_id": "security-group-id",
"direction": "ingress",
"protocol": "tcp",
"description": "Fake Rule"}
}
result_security_group_rule = self.scenario._create_security_group_rule(
"security-group-id", **security_group_rule_args)
self.assertEqual(expected_rules,
result_security_group_rule)
self._nc.create_security_group_rule.assert_called_once_with(
security_group_rule_data)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.create_security_group_rule")
def test_list_security_group_rules(self):
security_group_rules_list = [{"id": "security-group-rule-id"}]
security_group_rules_dict = {
"security_group_rules": security_group_rules_list}
self._nc.list_security_group_rules = mock.Mock(
return_value=security_group_rules_dict)
self.assertEqual(
self.scenario._list_security_group_rules(),
self._nc.list_security_group_rules.return_value)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.list_security_group_rules")
def test_show_security_group_rule(self):
return_rule = self.scenario._show_security_group_rule(1)
expected = self._nc.show_security_group_rule.return_value
expected = {"security_group_rule": expected["security_group_rule"]}
self.assertEqual(expected, return_rule)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.show_security_group_rule")
def test_delete_security_group_rule(self):
self.scenario._delete_security_group_rule(1)
self._nc.delete_security_group_rule.assert_called_once_with(1)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.delete_security_group_rule")
@ddt.data(
{"networks": [{"subnets": "subnet-id"}]},
{"pool_create_args": None, "networks": [{"subnets": ["subnet-id"]}]},
{"pool_create_args": {}, "networks": [{"subnets": ["subnet-id"]}]},
{"pool_create_args": {"name": "given-name"},
"networks": [{"subnets": ["subnet-id"]}]},
)
@ddt.unpack
def test__create_v1_pools(self, networks, pool_create_args=None):
pool_create_args = pool_create_args or {}
pool = {"pool": {"id": "pool-id"}}
self.scenario._create_lb_pool = mock.Mock(return_value=pool)
resultant_pools = self.scenario._create_v1_pools(
networks=networks, **pool_create_args)
if networks:
subnets = []
[subnets.extend(net["subnets"]) for net in networks]
self.scenario._create_lb_pool.assert_has_calls(
[mock.call(subnet,
**pool_create_args) for subnet in subnets])
self.assertEqual([pool] * len(subnets), resultant_pools)
@ddt.data(
{"subnet_id": "foo-id"},
{"pool_create_args": None, "subnet_id": "foo-id"},
{"pool_create_args": {}, "subnet_id": "foo-id"},
{"pool_create_args": {"name": "given-name"},
"subnet_id": "foo-id"},
{"subnet_id": "foo-id"}
)
@ddt.unpack
def test__create_lb_pool(self, subnet_id=None,
pool_create_args=None):
pool = {"pool": {"id": "pool-id"}}
pool_create_args = pool_create_args or {}
if pool_create_args.get("name") is None:
self.generate_random_name = mock.Mock(return_value="random_name")
self.clients("neutron").create_pool.return_value = pool
args = {"lb_method": "ROUND_ROBIN", "protocol": "HTTP",
"name": "random_name", "subnet_id": subnet_id}
args.update(pool_create_args)
expected_pool_data = {"pool": args}
resultant_pool = self.scenario._create_lb_pool(
subnet_id=subnet_id,
**pool_create_args)
self.assertEqual(pool, resultant_pool)
self.clients("neutron").create_pool.assert_called_once_with(
expected_pool_data)
self._test_atomic_action_timer(
self.scenario.atomic_actions(), "neutron.create_pool")
@ddt.data(
{},
{"vip_create_args": {}},
{"vip_create_args": {"name": "given-name"}},
)
@ddt.unpack
def test__create_v1_vip(self, vip_create_args=None):
vip = {"vip": {"id": "vip-id"}}
pool = {"pool": {"id": "pool-id", "subnet_id": "subnet-id"}}
vip_create_args = vip_create_args or {}
if vip_create_args.get("name") is None:
self.scenario.generate_random_name = mock.Mock(
return_value="random_name")
self.clients("neutron").create_vip.return_value = vip
args = {"protocol_port": 80, "protocol": "HTTP", "name": "random_name",
"subnet_id": pool["pool"]["subnet_id"],
"pool_id": pool["pool"]["id"]}
args.update(vip_create_args)
expected_vip_data = {"vip": args}
resultant_vip = self.scenario._create_v1_vip(pool, **vip_create_args)
self.assertEqual(vip, resultant_vip)
self.clients("neutron").create_vip.assert_called_once_with(
expected_vip_data)
@ddt.data(
{"floating_ip_args": {}},
{"floating_ip_args": {"floating_ip_address": "1.0.0.1"}},
)
@ddt.unpack
def test__create_floating_ip(self, floating_ip_args):
floating_network = "floating"
fip = {"floatingip": {"id": "fip-id"}}
network_id = "net-id"
self._nc.create_floatingip.return_value = fip
self._nc.list_networks.return_value = {
"networks": [
{"id": "id-1", "name": "xxx",
"router:external": True},
{"id": network_id, "name": floating_network,
"router:external": True}
]
}
expected_fip_data = {
"floatingip": {
"floating_network_id": network_id,
"description": "random_name",
**floating_ip_args
}
}
resultant_fip = self.scenario._create_floatingip(
floating_network, **floating_ip_args)
self.assertEqual(fip, resultant_fip)
self._nc.create_floatingip.assert_called_once_with(
expected_fip_data)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.create_floating_ip")
@mock.patch("%s.neutron.LOG.info" % NETWORK_SERVICE)
def test__create_floating_ip_in_pre_newton_openstack(self, mock_log_info):
from neutronclient.common import exceptions as n_exceptions
floating_network = "floating"
fip = {"floatingip": {"id": "fip-id"}}
network_id = "net-id"
self._nc.create_floatingip.return_value = fip
self._nc.list_networks.return_value = {
"networks": [
{"id": "id-1", "name": "xxx",
"router:external": True},
{"id": network_id, "name": floating_network,
"router:external": True}
]
}
e = n_exceptions.BadRequest("Unrecognized attribute(s) 'description'")
self._nc.create_floatingip.side_effect = e
a_e = self.assertRaises(n_exceptions.BadRequest,
self.scenario._create_floatingip,
floating_network)
self.assertEqual(e, a_e)
self.assertTrue(mock_log_info.called)
expected_fip_data = {"floatingip": {"floating_network_id": network_id,
"description": "random_name"}}
self._nc.create_floatingip.assert_called_once_with(expected_fip_data)
self._nc.list_networks.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.create_floating_ip")
@ddt.data(
{},
{"healthmonitor_create_args": {}},
{"healthmonitor_create_args": {"type": "TCP"}},
)
@ddt.unpack
def test__create_v1_healthmonitor(self,
healthmonitor_create_args=None):
hm = {"health_monitor": {"id": "hm-id"}}
healthmonitor_create_args = healthmonitor_create_args or {}
self.clients("neutron").create_health_monitor.return_value = hm
args = {"type": "PING", "delay": 20,
"timeout": 10, "max_retries": 3}
args.update(healthmonitor_create_args)
expected_hm_data = {"health_monitor": args}
resultant_hm = self.scenario._create_v1_healthmonitor(
**healthmonitor_create_args)
self.assertEqual(hm, resultant_hm)
self.clients("neutron").create_health_monitor.assert_called_once_with(
expected_hm_data)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.create_healthmonitor")
def test_list_v1_healthmonitors(self):
hm_list = []
hm_dict = {"health_monitors": hm_list}
self.clients("neutron").list_health_monitors.return_value = hm_dict
return_hm_dict = self.scenario._list_v1_healthmonitors()
self.assertEqual(hm_dict, return_hm_dict)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.list_healthmonitors")
def test_delete_v1_healthmonitor(self):
healthmonitor = {"health_monitor": {"id": "fake-id"}}
self.scenario._delete_v1_healthmonitor(healthmonitor["health_monitor"])
self.clients("neutron").delete_health_monitor.assert_called_once_with(
healthmonitor["health_monitor"]["id"])
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.delete_healthmonitor")
def test_update_healthmonitor(self):
expected_hm = {"health_monitor": {"admin_state_up": False}}
mock_update = self.clients("neutron").update_health_monitor
mock_update.return_value = expected_hm
hm = {"health_monitor": {"id": "pool-id"}}
healthmonitor_update_args = {"admin_state_up": False}
result_hm = self.scenario._update_v1_healthmonitor(
hm, **healthmonitor_update_args)
self.assertEqual(expected_hm, result_hm)
mock_update.assert_called_once_with(
hm["health_monitor"]["id"], expected_hm)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.update_healthmonitor")
def test_update_loadbalancer_resource(self):
lb = {"id": "1", "provisioning_status": "READY"}
new_lb = {"id": "1", "provisioning_status": "ACTIVE"}
self.clients("neutron").show_loadbalancer.return_value = {
"loadbalancer": new_lb}
return_lb = self.scenario.update_loadbalancer_resource(lb)
self.clients("neutron").show_loadbalancer.assert_called_once_with(
lb["id"])
self.assertEqual(new_lb, return_lb)
def test_update_loadbalancer_resource_not_found(self):
from neutronclient.common import exceptions as n_exceptions
lb = {"id": "1", "provisioning_status": "READY"}
self.clients("neutron").show_loadbalancer.side_effect = (
n_exceptions.NotFound)
self.assertRaises(exceptions.GetResourceNotFound,
self.scenario.update_loadbalancer_resource,
lb)
self.clients("neutron").show_loadbalancer.assert_called_once_with(
lb["id"])
def test_update_loadbalancer_resource_failure(self):
from neutronclient.common import exceptions as n_exceptions
lb = {"id": "1", "provisioning_status": "READY"}
self.clients("neutron").show_loadbalancer.side_effect = (
n_exceptions.Forbidden)
self.assertRaises(exceptions.GetResourceFailure,
self.scenario.update_loadbalancer_resource,
lb)
self.clients("neutron").show_loadbalancer.assert_called_once_with(
lb["id"])
def test__create_lbaasv2_loadbalancer(self):
neutronclient = self.clients("neutron")
create_args = {"name": "s_rally", "vip_subnet_id": "1",
"fake": "fake"}
new_lb = {"id": "1", "provisioning_status": "ACTIVE"}
self.scenario.generate_random_name = mock.Mock(
return_value="s_rally")
self.mock_wait_for_status.mock.return_value = new_lb
return_lb = self.scenario._create_lbaasv2_loadbalancer(
"1", fake="fake")
neutronclient.create_loadbalancer.assert_called_once_with(
{"loadbalancer": create_args})
self.assertEqual(new_lb, return_lb)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.create_lbaasv2_loadbalancer")
def test__list_lbaasv2_loadbalancers(self):
value = {"loadbalancer": [{"id": "1", "name": "s_rally"}]}
self.clients("neutron").list_loadbalancers.return_value = value
return_value = self.scenario._list_lbaasv2_loadbalancers(
True, fake="fake")
(self.clients("neutron").list_loadbalancers
.assert_called_once_with(True, fake="fake"))
self.assertEqual(value, return_value)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.list_lbaasv2_loadbalancers")
def test__create_bgpvpn(self, atomic_action=True):
bv = {"bgpvpn": {"id": "bgpvpn-id"}}
self.admin_clients("neutron").create_bgpvpn.return_value = bv
self.scenario.generate_random_name = mock.Mock(
return_value="random_name")
expected_bv_data = {"bgpvpn": {"name": "random_name"}}
resultant_bv = self.scenario._create_bgpvpn()
self.assertEqual(bv, resultant_bv)
self.admin_clients("neutron").create_bgpvpn.assert_called_once_with(
expected_bv_data)
if atomic_action:
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.create_bgpvpn")
def test_delete_bgpvpn(self):
bgpvpn_create_args = {}
bgpvpn = self.scenario._create_bgpvpn(**bgpvpn_create_args)
self.scenario._delete_bgpvpn(bgpvpn)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.delete_bgpvpn")
def test__list_bgpvpns(self):
bgpvpns_list = []
bgpvpns_dict = {"bgpvpns": bgpvpns_list}
self.admin_clients("neutron").list_bgpvpns.return_value = bgpvpns_dict
return_bgpvpns_list = self.scenario._list_bgpvpns()
self.assertEqual(bgpvpns_list, return_bgpvpns_list)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.list_bgpvpns")
@ddt.data(
{},
{"bgpvpn_update_args": {"update_name": True}},
{"bgpvpn_update_args": {"update_name": False}},
)
@ddt.unpack
def test__update_bgpvpn(self, bgpvpn_update_args=None):
expected_bgpvpn = {"bgpvpn": {}}
bgpvpn_update_data = bgpvpn_update_args or {}
if bgpvpn_update_data.get("update_name"):
expected_bgpvpn = {"bgpvpn": {"name": "updated_name"}}
self.admin_clients(
"neutron").update_bgpvpn.return_value = expected_bgpvpn
self.scenario.generate_random_name = mock.Mock(
return_value="updated_name")
bgpvpn = {"bgpvpn": {"name": "bgpvpn-name", "id": "bgpvpn-id"}}
result_bgpvpn = self.scenario._update_bgpvpn(bgpvpn,
**bgpvpn_update_data)
self.admin_clients("neutron").update_bgpvpn.assert_called_once_with(
bgpvpn["bgpvpn"]["id"], expected_bgpvpn)
self.assertEqual(expected_bgpvpn, result_bgpvpn)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.update_bgpvpn")
def test__create_bgpvpn_network_assoc(self):
network_id = "network_id"
bgpvpn_id = "bgpvpn_id"
value = {"network_association": {
"network_id": network_id,
"id": bgpvpn_id}}
self.clients(
"neutron").create_bgpvpn_network_assoc.return_value = value
network = {"id": network_id}
bgpvpn = {"bgpvpn": {"id": bgpvpn_id}}
return_value = self.scenario._create_bgpvpn_network_assoc(bgpvpn,
network)
netassoc = {"network_id": network["id"]}
self.clients(
"neutron").create_bgpvpn_network_assoc.assert_called_once_with(
bgpvpn_id, {"network_association": netassoc})
self.assertEqual(return_value, value)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.create_bgpvpn_network_assoc")
def test__create_router_network_assoc(self):
router_id = "router_id"
bgpvpn_id = "bgpvpn_id"
value = {"router_association": {
"router_id": router_id,
"id": "asso_id"}}
self.clients("neutron").create_bgpvpn_router_assoc.return_value = value
router = {"id": router_id}
bgpvpn = {"bgpvpn": {"id": bgpvpn_id}}
return_value = self.scenario._create_bgpvpn_router_assoc(bgpvpn,
router)
router_assoc = {"router_id": router["id"]}
self.clients(
"neutron").create_bgpvpn_router_assoc.assert_called_once_with(
bgpvpn_id, {"router_association": router_assoc})
self.assertEqual(return_value, value)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.create_bgpvpn_router_assoc")
def test__delete_bgpvpn_network_assoc(self):
bgpvpn_assoc_args = {}
asso_id = "aaaa-bbbb"
network_assoc = {"network_association": {"id": asso_id}}
bgpvpn = self.scenario._create_bgpvpn(**bgpvpn_assoc_args)
self.scenario._delete_bgpvpn_network_assoc(bgpvpn, network_assoc)
self.clients(
"neutron").delete_bgpvpn_network_assoc.assert_called_once_with(
bgpvpn["bgpvpn"]["id"], asso_id)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.delete_bgpvpn_network_assoc")
def test__delete_bgpvpn_router_assoc(self):
bgpvpn_assoc_args = {}
asso_id = "aaaa-bbbb"
router_assoc = {"router_association": {"id": asso_id}}
bgpvpn = self.scenario._create_bgpvpn(**bgpvpn_assoc_args)
self.scenario._delete_bgpvpn_router_assoc(bgpvpn, router_assoc)
self.clients(
"neutron").delete_bgpvpn_router_assoc.assert_called_once_with(
bgpvpn["bgpvpn"]["id"], asso_id)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.delete_bgpvpn_router_assoc")
def test__list_bgpvpn_network_assocs(self):
value = {"network_associations": []}
bgpvpn_id = "bgpvpn-id"
bgpvpn = {"bgpvpn": {"id": bgpvpn_id}}
self.clients("neutron").list_bgpvpn_network_assocs.return_value = value
return_asso_list = self.scenario._list_bgpvpn_network_assocs(bgpvpn)
self.clients(
"neutron").list_bgpvpn_network_assocs.assert_called_once_with(
bgpvpn_id)
self.assertEqual(value, return_asso_list)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.list_bgpvpn_network_assocs")
def test__list_bgpvpn_router_assocs(self):
value = {"router_associations": []}
bgpvpn_id = "bgpvpn-id"
bgpvpn = {"bgpvpn": {"id": bgpvpn_id}}
self.clients("neutron").list_bgpvpn_router_assocs.return_value = value
return_asso_list = self.scenario._list_bgpvpn_router_assocs(bgpvpn)
self.clients(
"neutron").list_bgpvpn_router_assocs.assert_called_once_with(
bgpvpn_id)
self.assertEqual(value, return_asso_list)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.list_bgpvpn_router_assocs")
def test__delete_trunk(self):
trunk_port = {"trunk": {"port_id": "fake-id"}}
self.scenario._delete_trunk(trunk_port["trunk"])
self.clients("neutron").delete_trunk.assert_called_once_with(
trunk_port["trunk"]["port_id"])
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.delete_trunk")
def test__create_trunk(self):
port_id = "port-id"
subport_payload = [{"port_id": "subport-port-id",
"segmentation_type": "vlan",
"segmentation_id": 1}]
trunk_payload = {
"port_id": port_id,
"name": self.scenario.generate_random_name.return_value,
"sub_ports": subport_payload
}
expected_trunk_args = {
"trunk": trunk_payload
}
self.scenario._create_trunk(trunk_payload)
self.clients("neutron").create_trunk.assert_called_once_with(
expected_trunk_args)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.create_trunk")
def test__list_trunks(self):
trunks = [{"name": "trunk1"}, {"name": "trunk2"}]
self.clients("neutron").list_trunks.return_value = {"trunks": trunks}
self.assertEqual(trunks, self.scenario._list_trunks())
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.list_trunks")
def test__list_ports_by_device_id(self):
device_id = "device-id"
self.scenario._list_ports_by_device_id(device_id)
self._nc.list_ports.assert_called_once_with(device_id=device_id)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.list_ports")
def test__list_subports_by_trunk(self):
trunk_id = "trunk-id"
self.scenario._list_subports_by_trunk(trunk_id)
self.clients("neutron").trunk_get_subports.assert_called_once_with(
trunk_id)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron.list_subports_by_trunk")
def test__add_subports_to_trunk(self):
trunk_id = "trunk-id"
port_id = "port-id"
subport_payload = [{"port_id": port_id}]
expected_subport_payload = {
"sub_ports": subport_payload
}
self.scenario._add_subports_to_trunk(trunk_id, subport_payload)
self.clients("neutron").trunk_add_subports.assert_called_once_with(
trunk_id, expected_subport_payload)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"neutron._add_subports_to_trunk")
class NeutronScenarioFunctionalTestCase(test.ScenarioTestCase):
@mock.patch("%s.generate_cidr" % NET_UTILS)
def test_functional_create_network_and_subnets(self, mock_generate_cidr):
clients = mock.MagicMock()
scenario = utils.NeutronScenario(context=self.context,
clients=clients)
network_create_args = {}
subnet_create_args = {}
subnets_per_network = 5
subnet_cidr_start = "1.1.1.0/24"
cidrs = [(4, "1.1.%d.0/24" % i) for i in range(subnets_per_network)]
cidrs_ = iter(cidrs)
mock_generate_cidr.side_effect = lambda **kw: next(cidrs_)
scenario._create_network_and_subnets(
network_create_args,
subnet_create_args,
subnets_per_network,
subnet_cidr_start)
# This checks both data (cidrs seem to be enough) and subnets number
nc = clients.neutron.return_value
result_cidrs = sorted(
(4, arg[0]["subnet"]["cidr"])
for arg, _kwarg in nc.create_subnet.call_args_list
)
self.assertEqual(cidrs, result_cidrs)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,691
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/environment/platforms/existing.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import traceback
from rally.common import cfg
from rally.common import logging
from rally.env import platform
from rally_openstack.common import osclients
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@platform.configure(name="existing", platform="openstack")
class OpenStack(platform.Platform):
"""Default plugin for OpenStack platform
It may be used to test any existing OpenStack API compatible cloud.
"""
VERSION_SCHEMA = {
"anyOf": [
{"type": "string", "description": "a string-like version."},
{"type": "number", "description": "a number-like version."}
]
}
CONFIG_SCHEMA = {
"type": "object",
"definitions": {
"user": {
"type": "object",
"properties": {
"username": {"type": "string"},
"password": {"type": "string"},
"project_name": {"type": "string"},
"tenant_name": {"type": "string"},
"domain_name": {"type": "string"},
"user_domain_name": {"type": "string"},
"project_domain_name": {"type": "string"},
},
"additionalProperties": False,
"anyOf": [
{
"description": "Keystone V2.0 (old-style)",
"required": ["username", "password", "tenant_name"]
},
{
"description": "Keystone V3.0 (modern terms)",
"required": ["username", "password", "project_name"]
}
]
},
"api_info": {
"type": "object",
"patternProperties": {
"^(?!neutron)([a-z]+)$": {
"type": "object",
"properties": {
"version": VERSION_SCHEMA,
"service_type": {"type": "string"}
},
"minProperties": 1,
"additionalProperties": False
},
"^neutron$": {
"type": "object",
"properties": {
"version": VERSION_SCHEMA,
"service_type": {"type": "string"},
"pre_newton": {
"type": "boolean",
"description": "Whether Neutron API is older "
"then OpenStack Newton or not. "
"Based on this option, some "
"external fields for "
"identifying resources can be "
"applied."
}
},
"minProperties": 1,
"additionalProperties": False
}
},
"additionalProperties": False
}
},
"properties": {
"auth_url": {"type": "string"},
"region_name": {"type": "string"},
"endpoint": {"type": ["string", "null"]},
"endpoint_type": {"enum": ["public", "internal", "admin", None]},
"https_insecure": {"type": "boolean"},
"https_cacert": {"type": "string"},
"https_cert": {"type": "string"},
"https_key": {"type": "string"},
"profiler_hmac_key": {"type": ["string", "null"]},
"profiler_conn_str": {"type": ["string", "null"]},
"admin": {"$ref": "#/definitions/user"},
"users": {
"type": "array",
"items": {"$ref": "#/definitions/user"},
"minItems": 1
},
"api_info": {"$ref": "#/definitions/api_info"}
},
"anyOf": [
{
"description": "The case when the admin is specified and the "
"users can be created via 'users@openstack' "
"context or 'existing_users' will be used.",
"required": ["admin", "auth_url"]},
{
"description": "The case when the only existing users are "
"specified.",
"required": ["users", "auth_url"]}
],
"additionalProperties": False
}
def create(self):
defaults = {
"region_name": None,
"endpoint_type": None,
"domain_name": None,
"user_domain_name": cfg.CONF.openstack.user_domain,
"project_domain_name": cfg.CONF.openstack.project_domain,
"https_insecure": False,
"https_cacert": None
}
"""Converts creds of real OpenStack to internal presentation."""
new_data = copy.deepcopy(self.spec)
if "endpoint" in new_data:
LOG.warning("endpoint is deprecated and not used.")
del new_data["endpoint"]
admin = new_data.pop("admin", None)
users = new_data.pop("users", [])
api_info = new_data.pop("api_info", None)
if admin:
if "project_name" in admin:
admin["tenant_name"] = admin.pop("project_name")
admin.update(new_data)
for k, v in defaults.items():
admin.setdefault(k, v)
for user in users:
if "project_name" in user:
user["tenant_name"] = user.pop("project_name")
user.update(new_data)
for k, v in defaults.items():
user.setdefault(k, v)
platform_data = {"admin": admin, "users": users}
if api_info:
platform_data["api_info"] = api_info
return platform_data, {}
def destroy(self):
# NOTE(boris-42): No action need to be performed.
pass
def cleanup(self, task_uuid=None):
return {
"message": "Coming soon!",
"discovered": 0,
"deleted": 0,
"failed": 0,
"resources": {},
"errors": []
}
def check_health(self):
"""Check whatever platform is alive."""
users_to_check = self.platform_data["users"]
if self.platform_data["admin"]:
users_to_check.append(self.platform_data["admin"])
clients = None
for user in users_to_check:
user["api_info"] = self.platform_data.get("api_info", {})
try:
clients = osclients.Clients(user)
if self.platform_data["admin"] == user:
clients.verified_keystone()
else:
clients.keystone()
except osclients.exceptions.RallyException as e:
# all rally native exceptions should provide user-friendly
# messages
return {"available": False,
"message": e.format_message(),
"traceback": traceback.format_exc()}
except Exception:
d = copy.deepcopy(user)
d["password"] = "***"
if logging.is_debug():
LOG.exception("Something unexpected had happened while "
"validating OpenStack credentials.")
if self.platform_data["admin"] == user:
user_role = "admin"
else:
user_role = "user"
return {
"available": False,
"message": (
"Bad %s creds: \n%s"
% (user_role,
json.dumps(d, indent=2, sort_keys=True))),
"traceback": traceback.format_exc()
}
for name in self.platform_data.get("api_info", {}):
if name == "keystone":
continue
if not hasattr(clients, name):
return {
"available": False,
"message": ("There is no OSClient plugin '%s' for"
" communicating with OpenStack API."
% name)}
client = getattr(clients, name)
try:
client.validate_version(client.choose_version())
client.create_client()
except osclients.exceptions.RallyException as e:
return {
"available": False,
"message": ("Invalid setting for '%(client)s':"
" %(error)s") % {
"client": name, "error": e.format_message()}
}
except Exception:
return {
"available": False,
"message": ("Can not create '%(client)s' with"
" %(version)s version.") % {
"client": name,
"version": client.choose_version()},
"traceback": traceback.format_exc()
}
return {"available": True}
def info(self):
"""Return information about cloud as dict."""
active_user = (self.platform_data["admin"]
or self.platform_data["users"][0])
services = []
for stype, name in osclients.Clients(active_user).services().items():
if name == "__unknown__":
# `__unknown__` name misleads, let's just not include it...
services.append({"type": stype})
else:
services.append({"type": stype, "name": name})
return {
"info": {
"services": sorted(services, key=lambda x: x["type"])
}
}
def _get_validation_context(self):
return {"users@openstack": {}}
@classmethod
def create_spec_from_sys_environ(cls, sys_environ):
"""Create a spec based on system environment.
* OS_AUTH_URL - The auth url for OpenStack cluster. Supported both
versioned and unversioned urls.
* OS_USERNAME - A user name with admin role to use.
* OS_PASSWORD - A password for selected user.
* OS_PROJECT_NAME - Project name to scope to
* OS_TENANT_NAME - Project name to scope to (an alternative for
$OS_PROJECT_NAME)
* OS_USER_DOMAIN_NAME - User domain name (in case of Keystone V3)
* OS_PROJECT_DOMAIN_NAME - Domain name containing project (in case of
Keystone V3)
* OS_ENDPOINT_TYPE - Type of endpoint. Valid endpoint types: admin,
public, internal
* OS_INTERFACE - Type of endpoint (an alternative for OS_ENDPOINT_TYPE)
* OS_REGION_NAME - Authentication region name
* OS_CACERT - A path to CA certificate bundle file
* OS_CERT - A path to Client certificate bundle file
* OS_KEY - A path to Client certificate key file
* OS_INSECURE - Disable server certificate verification
* OSPROFILER_HMAC_KEY - HMAC key to use for encrypting context while
using osprofiler
* OSPROFILER_CONN_STR - A connection string for OSProfiler collector
to grep profiling results while building html task reports
"""
from oslo_utils import strutils
required_env_vars = ["OS_AUTH_URL", "OS_USERNAME", "OS_PASSWORD"]
missing_env_vars = [v for v in required_env_vars if
v not in sys_environ]
if missing_env_vars:
return {"available": False,
"message": "The following variable(s) are missed: %s" %
missing_env_vars}
tenant_name = sys_environ.get("OS_PROJECT_NAME",
sys_environ.get("OS_TENANT_NAME"))
if tenant_name is None:
return {"available": False,
"message": "One of OS_PROJECT_NAME or OS_TENANT_NAME "
"should be specified."}
endpoint_type = sys_environ.get("OS_ENDPOINT_TYPE",
sys_environ.get("OS_INTERFACE"))
if endpoint_type and "URL" in endpoint_type:
endpoint_type = endpoint_type.replace("URL", "")
spec = {
"auth_url": sys_environ["OS_AUTH_URL"],
"admin": {
"username": sys_environ["OS_USERNAME"],
"password": sys_environ["OS_PASSWORD"],
"tenant_name": tenant_name
},
"endpoint_type": endpoint_type,
"region_name": sys_environ.get("OS_REGION_NAME", ""),
"https_cacert": sys_environ.get("OS_CACERT", ""),
"https_cert": sys_environ.get("OS_CERT", ""),
"https_key": sys_environ.get("OS_KEY", ""),
"https_insecure": strutils.bool_from_string(
sys_environ.get("OS_INSECURE")),
"profiler_hmac_key": sys_environ.get("OSPROFILER_HMAC_KEY"),
"profiler_conn_str": sys_environ.get("OSPROFILER_CONN_STR"),
"api_info": {
"keystone": {
"version": 2,
"service_type": "identity"
}
}
}
user_domain_name = sys_environ.get("OS_USER_DOMAIN_NAME")
project_domain_name = sys_environ.get("OS_PROJECT_DOMAIN_NAME")
identity_api_version = sys_environ.get(
"OS_IDENTITY_API_VERSION", sys_environ.get("IDENTITY_API_VERSION"))
if (identity_api_version == "3"
or (identity_api_version is None
and (user_domain_name or project_domain_name))):
# it is Keystone v3 and it has another config scheme
spec["admin"]["project_name"] = spec["admin"].pop("tenant_name")
spec["admin"]["user_domain_name"] = user_domain_name or "Default"
project_domain_name = project_domain_name or "Default"
spec["admin"]["project_domain_name"] = project_domain_name
spec["api_info"] = {
"keystone": {
"version": 3,
"service_type": "identityv3"
}
}
return {"spec": spec, "available": True, "message": "Available"}
@classmethod
def _get_doc(cls):
doc = cls.__doc__.strip()
env_vars_docs = cls.create_spec_from_sys_environ.__doc__
env_vars_description = "\n".join(
line for line in env_vars_docs.split("\n")[1:]
)
doc += (f"\n **The following environment variables are expected for "
f"creation a Rally environment using sustem environment "
f"variables**\n{env_vars_description}")
return doc
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,692
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/common/services/storage/test_block.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.common.services.storage import block
from tests.unit import test
class BlockTestCase(test.TestCase):
def setUp(self):
super(BlockTestCase, self).setUp()
self.clients = mock.MagicMock()
self.service = self._get_service_with_fake_impl()
def _get_service_with_fake_impl(self):
path = "rally_openstack.common.services.storage.block"
path = "%s.BlockStorage.discover_impl" % path
with mock.patch(path) as mock_discover:
mock_discover.return_value = mock.MagicMock(), None
service = block.BlockStorage(self.clients)
return service
def test_create_volume(self):
self.assertEqual(self.service._impl.create_volume.return_value,
self.service.create_volume("fake_volume"))
self.service._impl.create_volume.assert_called_once_with(
"fake_volume", availability_zone=None, consistencygroup_id=None,
description=None, group_id=None, imageRef=None, metadata=None,
name=None, project_id=None,
scheduler_hints=None, snapshot_id=None,
source_volid=None, user_id=None, volume_type=None, backup_id=None)
def test_list_volumes(self):
self.assertEqual(self.service._impl.list_volumes.return_value,
self.service.list_volumes(detailed=True))
self.service._impl.list_volumes.assert_called_once_with(
detailed=True, limit=None, marker=None, search_opts=None,
sort=None)
def test_get_volume(self):
self.assertTrue(self.service._impl.get_volume.return_value,
self.service.get_volume(1))
self.service._impl.get_volume.assert_called_once_with(1)
def test_update_volume(self):
self.assertTrue(self.service._impl.update_volume.return_value,
self.service.update_volume(1, name="name",
description="desp"))
self.service._impl.update_volume.assert_called_once_with(
1, name="name", description="desp")
def test_delete_volume(self):
self.service.delete_volume("volume")
self.service._impl.delete_volume.assert_called_once_with("volume")
def test_extend_volume(self):
self.assertEqual(self.service._impl.extend_volume.return_value,
self.service.extend_volume("volume", new_size=1))
self.service._impl.extend_volume.assert_called_once_with("volume",
new_size=1)
def test_list_snapshots(self):
self.assertEqual(self.service._impl.list_snapshots.return_value,
self.service.list_snapshots(detailed=True))
self.service._impl.list_snapshots.assert_called_once_with(
detailed=True)
def test_list_types(self):
self.assertEqual(
self.service._impl.list_types.return_value,
self.service.list_types(search_opts=None, is_public=None))
self.service._impl.list_types.assert_called_once_with(is_public=None,
search_opts=None)
def test_set_metadata(self):
self.assertEqual(
self.service._impl.set_metadata.return_value,
self.service.set_metadata("volume", sets=10, set_size=3))
self.service._impl.set_metadata.assert_called_once_with(
"volume", set_size=3, sets=10)
def test_delete_metadata(self):
keys = ["a", "b"]
self.service.delete_metadata("volume", keys=keys, deletes=10,
delete_size=3)
self.service._impl.delete_metadata.assert_called_once_with(
"volume", keys, delete_size=3, deletes=10)
def test_update_readonly_flag(self):
self.assertEqual(
self.service._impl.update_readonly_flag.return_value,
self.service.update_readonly_flag("volume", read_only=True))
self.service._impl.update_readonly_flag.assert_called_once_with(
"volume", read_only=True)
def test_upload_volume_to_image(self):
self.assertEqual(
self.service._impl.upload_volume_to_image.return_value,
self.service.upload_volume_to_image("volume",
force=False,
container_format="bare",
disk_format="raw"))
self.service._impl.upload_volume_to_image.assert_called_once_with(
"volume", container_format="bare", disk_format="raw", force=False)
def test_create_qos(self):
spaces = {"consumer": "both",
"write_iops_sec": "10",
"read_iops_sec": "1000"}
self.assertEqual(
self.service._impl.create_qos.return_value,
self.service.create_qos(spaces)
)
self.service._impl.create_qos.assert_called_once_with(spaces)
def test_list_qos(self):
self.assertEqual(
self.service._impl.list_qos.return_value,
self.service.list_qos(True)
)
self.service._impl.list_qos.assert_called_once_with(True)
def test_get_qos(self):
self.assertEqual(
self.service._impl.get_qos.return_value,
self.service.get_qos("qos"))
self.service._impl.get_qos.assert_called_once_with("qos")
def test_set_qos(self):
set_specs_args = {"test": "foo"}
self.assertEqual(
self.service._impl.set_qos.return_value,
self.service.set_qos(qos="qos", set_specs_args=set_specs_args))
self.service._impl.set_qos.assert_called_once_with(
qos="qos", set_specs_args=set_specs_args)
def test_qos_associate_type(self):
self.assertEqual(
self.service._impl.qos_associate_type.return_value,
self.service.qos_associate_type(qos_specs="fake_qos",
volume_type="fake_type"))
self.service._impl.qos_associate_type.assert_called_once_with(
"fake_qos", "fake_type")
def test_qos_disassociate_type(self):
self.assertEqual(
self.service._impl.qos_disassociate_type.return_value,
self.service.qos_disassociate_type(qos_specs="fake_qos",
volume_type="fake_type"))
self.service._impl.qos_disassociate_type.assert_called_once_with(
"fake_qos", "fake_type")
def test_create_snapshot(self):
self.assertEqual(
self.service._impl.create_snapshot.return_value,
self.service.create_snapshot(1, force=False, name=None,
description=None, metadata=None))
self.service._impl.create_snapshot.assert_called_once_with(
1, force=False, name=None, description=None, metadata=None)
def test_delete_snapshot(self):
self.service.delete_snapshot("snapshot")
self.service._impl.delete_snapshot.assert_called_once_with("snapshot")
def test_create_backup(self):
self.assertEqual(
self.service._impl.create_backup.return_value,
self.service.create_backup(1, container=None,
name=None, description=None,
incremental=False, force=False,
snapshot_id=None))
self.service._impl.create_backup.assert_called_once_with(
1, container=None, name=None, description=None, incremental=False,
force=False, snapshot_id=None)
def test_delete_backup(self):
self.service.delete_backup("backup")
self.service._impl.delete_backup.assert_called_once_with("backup")
def test_restore_backup(self):
self.assertEqual(self.service._impl.restore_backup.return_value,
self.service.restore_backup(1, volume_id=1))
self.service._impl.restore_backup.assert_called_once_with(
1, volume_id=1)
def test_list_backups(self):
self.assertEqual(self.service._impl.list_backups.return_value,
self.service.list_backups(detailed=True))
self.service._impl.list_backups.assert_called_once_with(detailed=True)
def test_list_transfers(self):
self.assertEqual(
self.service._impl.list_transfers.return_value,
self.service.list_transfers(detailed=True, search_opts=None))
self.service._impl.list_transfers.assert_called_once_with(
detailed=True, search_opts=None)
def test_create_volume_type(self):
self.assertEqual(
self.service._impl.create_volume_type.return_value,
self.service.create_volume_type(name="type",
description=None,
is_public=True))
self.service._impl.create_volume_type.assert_called_once_with(
name="type", description=None, is_public=True)
def test_get_volume_type(self):
self.assertEqual(
self.service._impl.get_volume_type.return_value,
self.service.get_volume_type("volume_type"))
self.service._impl.get_volume_type.assert_called_once_with(
"volume_type")
def test_delete_volume_type(self):
self.service.delete_volume_type("volume_type")
self.service._impl.delete_volume_type.assert_called_once_with(
"volume_type")
def test_set_volume_type_keys(self):
self.assertEqual(
self.service._impl.set_volume_type_keys.return_value,
self.service.set_volume_type_keys("volume_type",
metadata="metadata"))
self.service._impl.set_volume_type_keys.assert_called_once_with(
"volume_type", "metadata")
def test_transfer_create(self):
self.assertEqual(self.service._impl.transfer_create.return_value,
self.service.transfer_create(1, name="t"))
self.service._impl.transfer_create.assert_called_once_with(
1, name="t")
def test_transfer_accept(self):
self.assertEqual(self.service._impl.transfer_accept.return_value,
self.service.transfer_accept(1, auth_key=2))
self.service._impl.transfer_accept.assert_called_once_with(
1, auth_key=2)
def test_create_encryption_type(self):
self.assertEqual(
self.service._impl.create_encryption_type.return_value,
self.service.create_encryption_type("type", specs=2))
self.service._impl.create_encryption_type.assert_called_once_with(
"type", specs=2)
def test_get_encryption_type(self):
self.assertEqual(
self.service._impl.get_encryption_type.return_value,
self.service.get_encryption_type("type"))
self.service._impl.get_encryption_type.assert_called_once_with(
"type")
def test_list_encryption_type(self):
self.assertEqual(self.service._impl.list_encryption_type.return_value,
self.service.list_encryption_type(search_opts=None))
self.service._impl.list_encryption_type.assert_called_once_with(
search_opts=None)
def test_delete_encryption_type(self):
self.service.delete_encryption_type("type")
self.service._impl.delete_encryption_type.assert_called_once_with(
"type")
def test_update_encryption_type(self):
self.assertEqual(
self.service._impl.update_encryption_type.return_value,
self.service.update_encryption_type("type", specs=3))
self.service._impl.update_encryption_type.assert_called_once_with(
"type", specs=3)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,693
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/sahara/sahara_input_data_sources.py
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from urllib.parse import urlparse
import requests
from rally.common import validation
from rally_openstack.common import consts
from rally_openstack.common import osclients
from rally_openstack.task.cleanup import manager as resource_manager
from rally_openstack.task import context
from rally_openstack.task.scenarios.sahara import utils
from rally_openstack.task.scenarios.swift import utils as swift_utils
@validation.add("required_platform", platform="openstack", users=True)
@context.configure(name="sahara_input_data_sources", platform="openstack",
order=443)
class SaharaInputDataSources(context.OpenStackContext):
"""Context class for setting up Input Data Sources for an EDP job."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"input_type": {
"enum": ["swift", "hdfs"],
},
"input_url": {
"type": "string",
},
"swift_files": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"download_url": {
"type": "string"
}
},
"additionalProperties": False,
"required": ["name", "download_url"]
}
}
},
"additionalProperties": False,
"required": ["input_type", "input_url"]
}
def setup(self):
utils.init_sahara_context(self)
self.context["sahara"]["swift_objects"] = []
self.context["sahara"]["container_name"] = None
for user, tenant_id in self._iterate_per_tenants():
clients = osclients.Clients(user["credential"])
if self.config["input_type"] == "swift":
self.setup_inputs_swift(clients, tenant_id,
self.config["input_url"],
self.config["swift_files"],
user["credential"].username,
user["credential"].password)
else:
self.setup_inputs(clients, tenant_id,
self.config["input_type"],
self.config["input_url"])
def setup_inputs(self, clients, tenant_id, input_type, input_url):
input_ds = clients.sahara().data_sources.create(
name=self.generate_random_name(),
description="",
data_source_type=input_type,
url=input_url)
self.context["tenants"][tenant_id]["sahara"]["input"] = input_ds.id
def setup_inputs_swift(self, clients, tenant_id, input_url,
swift_files, username, password):
swift_scenario = swift_utils.SwiftScenario(clients=clients,
context=self.context)
# TODO(astudenov): use self.generate_random_name()
container_name = "rally_" + urlparse(input_url).netloc.rstrip(
".sahara")
self.context["sahara"]["container_name"] = (
swift_scenario._create_container(container_name=container_name))
for swift_file in swift_files:
content = requests.get(swift_file["download_url"]).content
self.context["sahara"]["swift_objects"].append(
swift_scenario._upload_object(
self.context["sahara"]["container_name"], content,
object_name=swift_file["name"]))
input_ds_swift = clients.sahara().data_sources.create(
name=self.generate_random_name(), description="",
data_source_type="swift", url=input_url,
credential_user=username, credential_pass=password)
self.context["tenants"][tenant_id]["sahara"]["input"] = (
input_ds_swift.id)
def cleanup(self):
resource_manager.cleanup(
names=["swift.object", "swift.container"],
users=self.context.get("users", []),
superclass=swift_utils.SwiftScenario,
task_id=self.get_owner_id())
resource_manager.cleanup(
names=["sahara.data_sources"],
users=self.context.get("users", []),
superclass=self.__class__,
task_id=self.get_owner_id())
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,694
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/common/services/network/net_utils.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from rally.common import logging
from rally.common import utils
LOG = logging.getLogger(__name__)
_IPv4_START_CIDR = "10.2.0.0/24"
_IPv6_START_CIDR = "dead:beaf::/64"
_IPv4_CIDR_INCR = utils.RAMInt()
_IPv6_CIDR_INCR = utils.RAMInt()
def get_ip_version(ip):
return netaddr.IPNetwork(ip).version
def generate_cidr(ip_version=None, start_cidr=None):
"""Generate next CIDR for network or subnet, without IP overlapping.
This is process and thread safe, because `cidr_incr' points to
value stored directly in RAM. This guarantees that CIDRs will be
serial and unique even under hard multiprocessing/threading load.
:param ip_version: version of IP to take default value for start_cidr
:param start_cidr: start CIDR str
"""
if start_cidr is None:
if ip_version == 6:
start_cidr = _IPv6_START_CIDR
else:
start_cidr = _IPv4_START_CIDR
ip_version = get_ip_version(start_cidr)
if ip_version == 4:
cidr = str(netaddr.IPNetwork(start_cidr).next(next(_IPv4_CIDR_INCR)))
else:
cidr = str(netaddr.IPNetwork(start_cidr).next(next(_IPv6_CIDR_INCR)))
LOG.debug("CIDR generated: %s" % cidr)
return ip_version, cidr
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,695
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/murano/utils.py
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import tempfile
import uuid
import zipfile
from rally.common import cfg
from rally.common import utils as common_utils
from rally.task import atomic
from rally.task import utils
import yaml
from rally_openstack.task import scenario
CONF = cfg.CONF
def pack_dir(source_directory, zip_name=None):
"""Archive content of the directory into .zip
Zip content of the source folder excluding root directory
into zip archive. When zip_name is specified, it would be used
as a destination for the archive. Otherwise method would
try to use temporary file as a destination for the archive.
:param source_directory: root of the newly created archive.
Directory is added recursively.
:param zip_name: destination zip file name.
:raises IOError: whenever there are IO issues.
:returns: path to the newly created zip archive either specified via
zip_name or a temporary one.
"""
if not zip_name:
fp = tempfile.NamedTemporaryFile(delete=False)
zip_name = fp.name
zipf = zipfile.ZipFile(zip_name, mode="w")
try:
for root, dirs, files in os.walk(source_directory):
for f in files:
abspath = os.path.join(root, f)
relpath = os.path.relpath(abspath, source_directory)
zipf.write(abspath, relpath)
finally:
zipf.close()
return zip_name
class MuranoScenario(scenario.OpenStackScenario):
"""Base class for Murano scenarios with basic atomic actions."""
@atomic.action_timer("murano.list_environments")
def _list_environments(self):
"""Return environments list."""
return self.clients("murano").environments.list()
@atomic.action_timer("murano.create_environment")
def _create_environment(self):
"""Create environment.
:param env_name: String used to name environment
:returns: Environment instance
"""
env_name = self.generate_random_name()
return self.clients("murano").environments.create({"name": env_name})
@atomic.action_timer("murano.delete_environment")
def _delete_environment(self, environment):
"""Delete given environment.
Return when the environment is actually deleted.
:param environment: Environment instance
"""
self.clients("murano").environments.delete(environment.id)
@atomic.action_timer("murano.create_session")
def _create_session(self, environment_id):
"""Create session for environment with specific id
:param environment_id: Environment id
:returns: Session instance
"""
return self.clients("murano").sessions.configure(environment_id)
@atomic.action_timer("murano.create_service")
def _create_service(self, environment, session, full_package_name,
image_name=None, flavor_name=None):
"""Create Murano service.
:param environment: Environment instance
:param session: Session instance
:param full_package_name: full name of the Murano package
:param image_name: Image name
:param flavor_name: Flavor name
:returns: Service instance
"""
app_id = str(uuid.uuid4())
data = {"?": {"id": app_id,
"type": full_package_name},
"name": self.generate_random_name()}
return self.clients("murano").services.post(
environment_id=environment.id, path="/", data=data,
session_id=session.id)
@atomic.action_timer("murano.deploy_environment")
def _deploy_environment(self, environment, session):
"""Deploy environment.
:param environment: Environment instance
:param session: Session instance
"""
self.clients("murano").sessions.deploy(environment.id,
session.id)
config = CONF.openstack
utils.wait_for_status(
environment,
ready_statuses=["READY"],
update_resource=utils.get_from_manager(["DEPLOY FAILURE"]),
timeout=config.murano_deploy_environment_timeout,
check_interval=config.murano_deploy_environment_check_interval
)
@atomic.action_timer("murano.list_packages")
def _list_packages(self, include_disabled=False):
"""Returns packages list.
:param include_disabled: if "True" then disabled packages will be
included in a the result.
Default value is False.
:returns: list of imported packages
"""
return self.clients("murano").packages.list(
include_disabled=include_disabled)
@atomic.action_timer("murano.import_package")
def _import_package(self, package):
"""Import package to the Murano.
:param package: path to zip archive with Murano application
:returns: imported package
"""
package = self.clients("murano").packages.create(
{}, {"file": open(package)}
)
return package
@atomic.action_timer("murano.delete_package")
def _delete_package(self, package):
"""Delete specified package.
:param package: package that will be deleted
"""
self.clients("murano").packages.delete(package.id)
@atomic.action_timer("murano.update_package")
def _update_package(self, package, body, operation="replace"):
"""Update specified package.
:param package: package that will be updated
:param body: dict object that defines what package property will be
updated, e.g {"tags": ["tag"]} or {"enabled": "true"}
:param operation: string object that defines the way of how package
property will be updated, allowed operations are
"add", "replace" or "delete".
Default value is "replace".
:returns: updated package
"""
return self.clients("murano").packages.update(
package.id, body, operation)
@atomic.action_timer("murano.filter_applications")
def _filter_applications(self, filter_query):
"""Filter list of uploaded application by specified criteria.
:param filter_query: dict that contains filter criteria, it
will be passed as **kwargs to filter method
e.g. {"category": "Web"}
:returns: filtered list of packages
"""
return self.clients("murano").packages.filter(**filter_query)
def _zip_package(self, package_path):
"""Call _prepare_package method that returns path to zip archive."""
return MuranoPackageManager(self.task)._prepare_package(package_path)
class MuranoPackageManager(common_utils.RandomNameGeneratorMixin):
RESOURCE_NAME_FORMAT = "app.rally_XXXXXXXX_XXXXXXXX"
def __init__(self, task):
self.task = task
@staticmethod
def _read_from_file(filename):
with open(filename, "r") as f:
read_data = f.read()
return yaml.safe_load(read_data)
@staticmethod
def _write_to_file(data, filename):
with open(filename, "w") as f:
yaml.safe_dump(data, f)
def _change_app_fullname(self, app_dir):
"""Change application full name.
To avoid name conflict error during package import (when user
tries to import a few packages into the same tenant) need to change the
application name. For doing this need to replace following parts
in manifest.yaml
from
...
FullName: app.name
...
Classes:
app.name: app_class.yaml
to:
...
FullName: <new_name>
...
Classes:
<new_name>: app_class.yaml
:param app_dir: path to directory with Murano application context
"""
new_fullname = self.generate_random_name()
manifest_file = os.path.join(app_dir, "manifest.yaml")
manifest = self._read_from_file(manifest_file)
class_file_name = manifest["Classes"][manifest["FullName"]]
# update manifest.yaml file
del manifest["Classes"][manifest["FullName"]]
manifest["FullName"] = new_fullname
manifest["Classes"][new_fullname] = class_file_name
self._write_to_file(manifest, manifest_file)
def _prepare_package(self, package_path):
"""Check whether the package path is path to zip archive or not.
If package_path is not a path to zip archive but path to Murano
application folder, than method prepares zip archive with Murano
application. It copies directory with Murano app files to temporary
folder, changes manifest.yaml and class file (to avoid '409 Conflict'
errors in Murano) and prepares zip package.
:param package_path: path to zip archive or directory with package
components
:returns: path to zip archive with Murano application
"""
if not zipfile.is_zipfile(package_path):
tmp_dir = tempfile.mkdtemp()
pkg_dir = os.path.join(tmp_dir, "package/")
try:
shutil.copytree(os.path.expanduser(package_path), pkg_dir)
self._change_app_fullname(pkg_dir)
package_path = pack_dir(pkg_dir)
finally:
shutil.rmtree(tmp_dir)
return package_path
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,696
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/nova/keypairs.py
|
# Copyright 2014: Rackspace UK
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import validation
from rally_openstack.common import osclients
from rally_openstack.task.cleanup import manager as resource_manager
from rally_openstack.task import context
@validation.add("required_platform", platform="openstack", users=True)
@context.configure(name="keypair", platform="openstack", order=310)
class Keypair(context.OpenStackContext):
"""Create Nova KeyPair for each user."""
# NOTE(andreykurilin): "type" != "null", since we need to support backward
# compatibility(previously empty dict was valid) and I hope in near
# future, we will extend this context to accept keys.
CONFIG_SCHEMA = {"type": "object",
"additionalProperties": False}
def _generate_keypair(self, credential):
nova_client = osclients.Clients(credential).nova()
# NOTE(hughsaunders): If keypair exists, it should re-generate name.
keypairs = nova_client.keypairs.list()
keypair_names = [keypair.name for keypair in keypairs]
while True:
keypair_name = self.generate_random_name()
if keypair_name not in keypair_names:
break
keypair = nova_client.keypairs.create(keypair_name)
return {"private": keypair.private_key,
"public": keypair.public_key,
"name": keypair_name,
"id": keypair.id}
def setup(self):
for user in self.context["users"]:
user["keypair"] = self._generate_keypair(user["credential"])
def cleanup(self):
resource_manager.cleanup(names=["nova.keypairs"],
users=self.context.get("users", []),
superclass=self.__class__,
task_id=self.get_owner_id())
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,697
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/barbican/containers.py
|
# Copyright 2018 Red Hat, Inc. <http://www.redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.barbican import utils
"""Scenarios for Barbican containers."""
@validation.add("required_services", services=[consts.Service.BARBICAN])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(name="BarbicanContainers.list")
class BarbicanContainersList(utils.BarbicanBase):
def run(self):
"""List Containers."""
self.admin_barbican.list_container()
@validation.add("required_services", services=[consts.Service.BARBICAN])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(name="BarbicanContainers.create_and_delete")
class BarbicanContainersGenericCreateAndDelete(utils.BarbicanBase):
def run(self):
"""Create and delete generic container."""
container = self.admin_barbican.container_create()
self.admin_barbican.container_delete(container.container_ref)
@validation.add("required_services", services=[consts.Service.BARBICAN])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(name="BarbicanContainers.create_and_add")
class BarbicanContainersGenericCreateAndAddSecret(utils.BarbicanBase):
def run(self):
"""Create secret, create generic container, and delete container."""
secret = self.admin_barbican.create_secret()
secret = {"secret": secret}
container = self.admin_barbican.container_create(secrets=secret)
self.admin_barbican.container_delete(container.container_ref)
@validation.add("required_services", services=[consts.Service.BARBICAN])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(name="BarbicanContainers.create_certificate_and_delete")
class BarbicanContainersCertificateCreateAndDelete(utils.BarbicanBase):
def run(self):
"""Create and delete certificate container."""
container = self.admin_barbican.create_certificate_container()
self.admin_barbican.container_delete(container.container_ref)
@validation.add("required_services", services=[consts.Service.BARBICAN])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(name="BarbicanContainers.create_rsa_and_delete")
class BarbicanContainersRSACreateAndDelete(utils.BarbicanBase):
def run(self):
"""Create and delete certificate container."""
container = self.admin_barbican.create_rsa_container()
self.admin_barbican.container_delete(container.container_ref)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,698
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/sahara/consts.py
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
NODE_PROCESSES = {
"vanilla": {
"1.2.1": {
"master": ["namenode", "jobtracker", "oozie"],
"worker": ["datanode", "tasktracker"]
},
"2.3.0": {
"master": ["namenode", "resourcemanager", "historyserver",
"oozie"],
"worker": ["datanode", "nodemanager"]
},
"2.4.1": {
"master": ["namenode", "resourcemanager", "historyserver",
"oozie"],
"worker": ["datanode", "nodemanager"]
},
"2.6.0": {
"master": ["namenode", "resourcemanager", "historyserver",
"oozie"],
"worker": ["datanode", "nodemanager"]
},
"2.7.1": {
"master": ["namenode", "resourcemanager", "historyserver",
"oozie"],
"worker": ["datanode", "nodemanager"]
}
},
"hdp": {
"1.3.2": {
"master": ["JOBTRACKER", "NAMENODE", "SECONDARY_NAMENODE",
"GANGLIA_SERVER", "NAGIOS_SERVER",
"AMBARI_SERVER", "OOZIE_SERVER"],
"worker": ["TASKTRACKER", "DATANODE", "HDFS_CLIENT",
"MAPREDUCE_CLIENT", "OOZIE_CLIENT", "PIG"]
},
"2.0.6": {
"manager": ["AMBARI_SERVER", "GANGLIA_SERVER",
"NAGIOS_SERVER"],
"master": ["NAMENODE", "SECONDARY_NAMENODE",
"ZOOKEEPER_SERVER", "ZOOKEEPER_CLIENT",
"HISTORYSERVER", "RESOURCEMANAGER",
"OOZIE_SERVER"],
"worker": ["DATANODE", "HDFS_CLIENT", "ZOOKEEPER_CLIENT",
"PIG", "MAPREDUCE2_CLIENT", "YARN_CLIENT",
"NODEMANAGER", "OOZIE_CLIENT"]
},
"2.2": {
"manager": ["AMBARI_SERVER", "GANGLIA_SERVER",
"NAGIOS_SERVER"],
"master": ["NAMENODE", "SECONDARY_NAMENODE",
"ZOOKEEPER_SERVER", "ZOOKEEPER_CLIENT",
"HISTORYSERVER", "RESOURCEMANAGER",
"OOZIE_SERVER"],
"worker": ["DATANODE", "HDFS_CLIENT", "ZOOKEEPER_CLIENT",
"PIG", "MAPREDUCE2_CLIENT", "YARN_CLIENT",
"NODEMANAGER", "OOZIE_CLIENT", "TEZ_CLIENT"]
}
},
"cdh": {
"5": {
"manager": ["CLOUDERA_MANAGER"],
"master": ["HDFS_NAMENODE", "YARN_RESOURCEMANAGER",
"OOZIE_SERVER", "YARN_JOBHISTORY",
"HDFS_SECONDARYNAMENODE", "HIVE_METASTORE",
"HIVE_SERVER2"],
"worker": ["YARN_NODEMANAGER", "HDFS_DATANODE"]
},
"5.4.0": {
"manager": ["CLOUDERA_MANAGER"],
"master": ["HDFS_NAMENODE", "YARN_RESOURCEMANAGER",
"OOZIE_SERVER", "YARN_JOBHISTORY",
"HDFS_SECONDARYNAMENODE", "HIVE_METASTORE",
"HIVE_SERVER2"],
"worker": ["YARN_NODEMANAGER", "HDFS_DATANODE"]
},
"5.5.0": {
"manager": ["CLOUDERA_MANAGER"],
"master": ["HDFS_NAMENODE", "YARN_RESOURCEMANAGER",
"OOZIE_SERVER", "YARN_JOBHISTORY",
"HDFS_SECONDARYNAMENODE", "HIVE_METASTORE",
"HIVE_SERVER2"],
"worker": ["YARN_NODEMANAGER", "HDFS_DATANODE"]
}
},
"spark": {
"1.3.1": {
"master": ["namenode", "master"],
"worker": ["datanode", "slave"]
},
"1.6.0": {
"master": ["namenode", "master"],
"worker": ["datanode", "slave"]
}
},
"ambari": {
"2.3": {
"master-edp": ["Hive Metastore", "HiveServer", "Oozie"],
"master": ["Ambari", "MapReduce History Server",
"Spark History Server", "NameNode", "ResourceManager",
"SecondaryNameNode", "YARN Timeline Server",
"ZooKeeper"],
"worker": ["DataNode", "NodeManager"]
}
},
"mapr": {
"5.0.0.mrv2": {
"master": ["Metrics", "Webserver", "Zookeeper", "HTTPFS",
"Oozie", "FileServer", "CLDB", "Flume", "Hue",
"NodeManager", "HistoryServer", "ResourseManager",
"HiveServer2", "HiveMetastore", "Sqoop2-Client",
"Sqoop2-Server"],
"worker": ["NodeManager", "FileServer"]
},
"5.1.0.mrv2": {
"master": ["Metrics", "Webserver", "Zookeeper", "HTTPFS",
"Oozie", "FileServer", "CLDB", "Flume", "Hue",
"NodeManager", "HistoryServer", "ResourseManager",
"HiveServer2", "HiveMetastore", "Sqoop2-Client",
"Sqoop2-Server"],
"worker": ["NodeManager", "FileServer"]
}
}
}
REPLICATION_CONFIGS = {
"vanilla": {
"1.2.1": {
"target": "HDFS",
"config_name": "dfs.replication"
},
"2.3.0": {
"target": "HDFS",
"config_name": "dfs.replication"
},
"2.4.1": {
"target": "HDFS",
"config_name": "dfs.replication"
},
"2.6.0": {
"target": "HDFS",
"config_name": "dfs.replication"
},
"2.7.1": {
"target": "HDFS",
"config_name": "dfs.replication"
}
},
"hdp": {
"1.3.2": {
"target": "HDFS",
"config_name": "dfs.replication"
},
"2.0.6": {
"target": "HDFS",
"config_name": "dfs.replication"
},
"2.2": {
"target": "HDFS",
"config_name": "dfs.replication"
}
},
"cdh": {
"5": {
"target": "HDFS",
"config_name": "dfs_replication"
},
"5.4.0": {
"target": "HDFS",
"config_name": "dfs_replication"
},
"5.5.0": {
"target": "HDFS",
"config_name": "dfs_replication"
}
},
"spark": {
"1.3.1": {
"target": "HDFS",
"config_name": "dfs_replication"
},
"1.6.0": {
"target": "HDFS",
"config_name": "dfs_replication"
}
},
"ambari": {
"2.3": {
"target": "HDFS",
"config_name": "dfs_replication"
}
},
"mapr": {
"5.0.0.mrv2": {
"target": "HDFS",
"config_name": "dfs.replication"
},
"5.1.0.mrv2": {
"target": "HDFS",
"config_name": "dfs.replication"
}
}
}
ANTI_AFFINITY_PROCESSES = {
"vanilla": {
"1.2.1": ["datanode"],
"2.3.0": ["datanode"],
"2.4.1": ["datanode"],
"2.6.0": ["datanode"],
"2.7.1": ["datanode"]
},
"hdp": {
"1.3.2": ["DATANODE"],
"2.0.6": ["DATANODE"],
"2.2": ["DATANODE"]
},
"cdh": {
"5": ["HDFS_DATANODE"],
"5.4.0": ["HDFS_DATANODE"],
"5.5.0": ["HDFS_DATANODE"]
},
"spark": {
"1.3.1": ["datanode"],
"1.6.0": ["datanode"]
},
"ambari": {
"2.3": ["DataNode"],
},
"mapr": {
"5.0.0.mrv2": ["FileServer"],
"5.1.0.mrv2": ["FileServer"],
}
}
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,699
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/contexts/magnum/test_cluster_templates.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.contexts.magnum import cluster_templates
from rally_openstack.task.scenarios.magnum import utils as magnum_utils
from tests.unit import fakes
from tests.unit import test
BASE_CTX = "rally.task.context"
CTX = "rally_openstack.task.contexts"
BASE_SCN = "rally.task.scenarios"
SCN = "rally_openstack.task.scenarios"
class ClusterTemplatesGeneratorTestCase(test.ScenarioTestCase):
"""Generate tenants."""
def _gen_tenants(self, count):
tenants = {}
for id_ in range(count):
tenants[str(id_)] = dict(name=str(id_))
return tenants
@mock.patch("%s.magnum.utils.MagnumScenario."
"_create_cluster_template" % SCN,
return_value=fakes.FakeClusterTemplate(id="uuid"))
def test_setup(self, mock__create_cluster_template):
tenants_count = 2
users_per_tenant = 5
tenants = self._gen_tenants(tenants_count)
users = []
for ten_id in tenants:
for i in range(users_per_tenant):
users.append({"id": i, "tenant_id": ten_id,
"credential": mock.MagicMock()})
self.context.update({
"config": {
"users": {
"tenants": tenants_count,
"users_per_tenant": users_per_tenant,
"concurrent": 10,
},
"cluster_templates": {
"dns_nameserver": "8.8.8.8",
"external_network_id": "public",
"flavor_id": "m1.small",
"docker_volume_size": 5,
"coe": "kubernetes",
"image_id": "fedora-atomic-latest",
"network_driver": "flannel"
}
},
"users": users,
"tenants": tenants
})
ct_ctx = cluster_templates.ClusterTemplateGenerator(self.context)
ct_ctx.setup()
ct_ctx_config = self.context["config"]["cluster_templates"]
image_id = ct_ctx_config.get("image_id")
external_network_id = ct_ctx_config.get(
"external_network_id")
dns_nameserver = ct_ctx_config.get("dns_nameserver")
flavor_id = ct_ctx_config.get("flavor_id")
docker_volume_size = ct_ctx_config.get("docker_volume_size")
network_driver = ct_ctx_config.get("network_driver")
coe = ct_ctx_config.get("coe")
mock_calls = [mock.call(image_id=image_id,
external_network_id=external_network_id,
dns_nameserver=dns_nameserver,
flavor_id=flavor_id,
docker_volume_size=docker_volume_size,
network_driver=network_driver, coe=coe)
for i in range(tenants_count)]
mock__create_cluster_template.assert_has_calls(mock_calls)
# check that stack ids have been saved in context
for ten_id in self.context["tenants"].keys():
self.assertIsNotNone(
self.context["tenants"][ten_id]["cluster_template"])
@mock.patch("%s.magnum.cluster_templates.resource_manager.cleanup" % CTX)
def test_cleanup(self, mock_cleanup):
self.context.update({
"users": mock.MagicMock()
})
ct_ctx = cluster_templates.ClusterTemplateGenerator(self.context)
ct_ctx.cleanup()
mock_cleanup.assert_called_once_with(
names=["magnum.cluster_templates"],
users=self.context["users"],
superclass=magnum_utils.MagnumScenario,
task_id=self.context["owner_id"])
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,700
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/cinder/volume_backups.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.cinder import utils as cinder_utils
"""Scenarios for Cinder Volume Backup."""
@validation.add("number", param_name="size", minval=1, integer_only=True)
@validation.add("restricted_parameters", param_names=["name", "display_name"],
subdict="create_volume_kwargs")
@validation.add("restricted_parameters", param_names="name",
subdict="create_backup_kwargs")
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_cinder_services", services="cinder-backup")
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(
context={"cleanup@openstack": ["cinder"]},
name="CinderVolumeBackups.create_incremental_volume_backup",
platform="openstack")
class CreateIncrementalVolumeBackup(cinder_utils.CinderBasic):
def run(self, size, do_delete=True, create_volume_kwargs=None,
create_backup_kwargs=None):
"""Create an incremental volume backup.
The scenario first create a volume, the create a backup, the backup
is full backup. Because Incremental backup must be based on the
full backup. finally create a incremental backup.
:param size: volume size in GB
:param do_delete: deletes backup and volume after creating if True
:param create_volume_kwargs: optional args to create a volume
:param create_backup_kwargs: optional args to create a volume backup
"""
create_volume_kwargs = create_volume_kwargs or {}
create_backup_kwargs = create_backup_kwargs or {}
volume = self.cinder.create_volume(size, **create_volume_kwargs)
backup1 = self.cinder.create_backup(volume.id, **create_backup_kwargs)
backup2 = self.cinder.create_backup(volume.id, incremental=True)
if do_delete:
self.cinder.delete_backup(backup2)
self.cinder.delete_backup(backup1)
self.cinder.delete_volume(volume)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,701
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/common/services/loadbalancer/octavia.py
|
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import cfg
from rally.common import logging
from rally import exceptions
from rally.task import atomic
from rally.task import service
from rally.task import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class Octavia(service.Service):
@atomic.action_timer("octavia.load_balancer_list")
def load_balancer_list(self):
"""List all load balancers
:return:
List of load balancers
"""
return self._clients.octavia().load_balancer_list()
@atomic.action_timer("octavia.load_balancer_show")
def load_balancer_show(self, lb_id):
"""Show a load balancer
:param string lb:
dict of the load balancer to show
:return:
A dict of the specified load balancer's settings
"""
try:
new_lb = self._clients.octavia().load_balancer_show(lb_id)
except Exception as e:
if getattr(e, "code", 400) == 404:
raise exceptions.GetResourceNotFound(resource=lb_id)
raise exceptions.GetResourceFailure(resource=lb_id, err=e)
return new_lb
@atomic.action_timer("octavia.load_balancer_create")
def load_balancer_create(self, subnet_id, description=None,
admin_state=None, project_id=None,
listeners=None, flavor_id=None,
provider=None, vip_qos_policy_id=None):
"""Create a load balancer
:return:
A dict of the created load balancer's settings
"""
args = {
"name": self.generate_random_name(),
"description": description,
"listeners": listeners,
"provider": provider,
"admin_state_up": admin_state or True,
"project_id": project_id,
"vip_subnet_id": subnet_id,
"vip_qos_policy_id": vip_qos_policy_id,
}
lb = self._clients.octavia().load_balancer_create(
json={"loadbalancer": args})
return lb["loadbalancer"]
@atomic.action_timer("octavia.load_balancer_delete")
def load_balancer_delete(self, lb_id, cascade=False):
"""Delete a load balancer
:param string lb:
The dict of the load balancer to delete
:return:
Response Code from the API
"""
return self._clients.octavia().load_balancer_delete(
lb_id, cascade=cascade)
@atomic.action_timer("octavia.load_balancer_set")
def load_balancer_set(self, lb_id, lb_update_args):
"""Update a load balancer's settings
:param string lb_id:
The dict of the load balancer to update
:param lb_update_args:
A dict of arguments to update a loadbalancer
:return:
Response Code from API
"""
return self._clients.octavia().load_balancer_set(
lb_id, json={"loadbalancer": lb_update_args})
@atomic.action_timer("octavia.load_balancer_stats_show")
def load_balancer_stats_show(self, lb_id, **kwargs):
"""Shows the current statistics for a load balancer.
:param string lb:
dict of the load balancer
:return:
A dict of the specified load balancer's statistics
"""
return self._clients.octavia().load_balancer_stats_show(
lb_id, **kwargs)
@atomic.action_timer("octavia.load_balancer_failover")
def load_balancer_failover(self, lb_id):
"""Trigger load balancer failover
:param string lb:
dict of the load balancer to failover
:return:
Response Code from the API
"""
return self._clients.octavia().load_balancer_failover(lb_id)
@atomic.action_timer("octavia.listener_list")
def listener_list(self, **kwargs):
"""List all listeners
:param kwargs:
Parameters to filter on
:return:
List of listeners
"""
return self._clients.octavia().listener_list(**kwargs)
@atomic.action_timer("octavia.listener_show")
def listener_show(self, listener_id):
"""Show a listener
:param string listener_id:
ID of the listener to show
:return:
A dict of the specified listener's settings
"""
return self._clients.octavia().listener_show(listener_id)
@atomic.action_timer("octavia.listener_create")
def listener_create(self, **kwargs):
"""Create a listener
:param kwargs:
Parameters to create a listener with (expects json=)
:return:
A dict of the created listener's settings
"""
return self._clients.octavia().listener_create(**kwargs)
@atomic.action_timer("octavia.listener_delete")
def listener_delete(self, listener_id):
"""Delete a listener
:param stirng listener_id:
ID of listener to delete
:return:
Response Code from the API
"""
return self._clients.octavia().listener_delete(listener_id)
@atomic.action_timer("octavia.listener_set")
def listener_set(self, listener_id, **kwargs):
"""Update a listener's settings
:param string listener_id:
ID of the listener to update
:param kwargs:
A dict of arguments to update a listener
:return:
Response Code from the API
"""
return self._clients.octavia().listener_set(listener_id, **kwargs)
@atomic.action_timer("octavia.listener_stats_show")
def listener_stats_show(self, listener_id, **kwargs):
"""Shows the current statistics for a listener
:param string listener_id:
ID of the listener
:return:
A dict of the specified listener's statistics
"""
return self._clients.octavia().listener_stats_show(
listener_id, **kwargs)
@atomic.action_timer("octavia.pool_list")
def pool_list(self, **kwargs):
"""List all pools
:param kwargs:
Parameters to filter on
:return:
List of pools
"""
return self._clients.octavia().pool_list(**kwargs)
def update_pool_resource(self, pool):
try:
new_pool = self._clients.octavia().pool_show(pool["id"])
except Exception as e:
if getattr(e, "status_code", 400) == 404:
raise exceptions.GetResourceNotFound(resource=pool)
raise exceptions.GetResourceFailure(resource=pool, err=e)
return new_pool
@atomic.action_timer("octavia.pool_create")
def pool_create(self, lb_id, protocol, lb_algorithm,
listener_id=None, description=None,
admin_state_up=True, project_id=None,
session_persistence=None):
"""Create a pool
:param lb_id: ID of the loadbalancer
:param protocol: protocol of the resource
:param lb_algorithm: loadbalancing algorithm of the pool
:param listener_id: ID of the listener
:param description: a human readable description of the pool
:param admin_state_up: administrative state of the resource
:param project_id: project ID of the resource
:param session_persistence: a json object specifiying the session
persistence of the pool
:return:
A dict of the created pool's settings
"""
args = {
"name": self.generate_random_name(),
"loadbalancer_id": lb_id,
"protocol": protocol,
"lb_algorithm": lb_algorithm,
"listener_id": listener_id,
"description": description,
"admin_state_up": admin_state_up,
"project_id": project_id,
"session_persistence": session_persistence
}
pool = self._clients.octavia().pool_create(
json={"pool": args})
pool = pool["pool"]
pool = utils.wait_for_status(
pool,
ready_statuses=["ACTIVE"],
status_attr="provisioning_status",
update_resource=self.update_pool_resource,
timeout=CONF.openstack.octavia_create_loadbalancer_timeout,
check_interval=(
CONF.openstack.octavia_create_loadbalancer_poll_interval)
)
return pool
@atomic.action_timer("octavia.pool_delete")
def pool_delete(self, pool_id):
"""Delete a pool
:param string pool_id:
ID of pool to delete
:return:
Response Code from the API
"""
return self._clients.octavia().pool_delete(pool_id)
@atomic.action_timer("octavia.pool_show")
def pool_show(self, pool_id):
"""Show a pool's settings
:param string pool_id:
ID of the pool to show
:return:
Dict of the specified pool's settings
"""
return self._clients.octavia().pool_show(pool_id)
@atomic.action_timer("octavia.pool_set")
def pool_set(self, pool_id, pool_update_args):
"""Update a pool's settings
:param pool_id:
ID of the pool to update
:param pool_update_args:
A dict of arguments to update a pool
:return:
Response Code from the API
"""
return self._clients.octavia().pool_set(
pool_id, json={"pool": pool_update_args})
@atomic.action_timer("octavia.member_list")
def member_list(self, pool_id, **kwargs):
"""Lists the member from a given pool id
:param pool_id:
ID of the pool
:param kwargs:
A dict of filter arguments
:return:
Response list members
"""
return self._clients.octavia().member_list(pool_id, **kwargs)
@atomic.action_timer("octavia.member_show")
def member_show(self, pool_id, member_id):
"""Showing a member details of a pool
:param pool_id:
ID of pool the member is added
:param member_id:
ID of the member
:param kwargs:
A dict of arguments
:return:
Response of member
"""
return self._clients.octavia().member_show(pool_id, member_id)
@atomic.action_timer("octavia.member_create")
def member_create(self, pool_id, **kwargs):
"""Creating a member for the given pool id
:param pool_id:
ID of pool to which member is added
:param kwargs:
A Dict of arguments
:return:
A member details on successful creation
"""
return self._clients.octavia().member_create(pool_id, **kwargs)
@atomic.action_timer("octavia.member_delete")
def member_delete(self, pool_id, member_id):
"""Removing a member from a pool and mark that member as deleted
:param pool_id:
ID of the pool
:param member_id:
ID of the member to be deleted
:return:
Response code from the API
"""
return self._clients.octavia().member_delete(pool_id, member_id)
@atomic.action_timer("octavia.member_set")
def member_set(self, pool_id, member_id, **kwargs):
"""Updating a member settings
:param pool_id:
ID of the pool
:param member_id:
ID of the member to be updated
:param kwargs:
A dict of the values of member to be updated
:return:
Response code from the API
"""
return self._clients.octavia().member_set(pool_id, member_id, **kwargs)
@atomic.action_timer("octavia.l7policy_list")
def l7policy_list(self, **kwargs):
"""List all l7policies
:param kwargs:
Parameters to filter on
:return:
List of l7policies
"""
return self._clients.octavia().l7policy_list(**kwargs)
@atomic.action_timer("octavia.l7policy_create")
def l7policy_create(self, **kwargs):
"""Create a l7policy
:param kwargs:
Parameters to create a l7policy with (expects json=)
:return:
A dict of the created l7policy's settings
"""
return self._clients.octavia().l7policy_create(**kwargs)
@atomic.action_timer("octavia.l7policy_delete")
def l7policy_delete(self, l7policy_id):
"""Delete a l7policy
:param string l7policy_id:
ID of l7policy to delete
:return:
Response Code from the API
"""
return self._clients.octavia().l7policy_delete(l7policy_id)
@atomic.action_timer("octavia.l7policy_show")
def l7policy_show(self, l7policy_id):
"""Show a l7policy's settings
:param string l7policy_id:
ID of the l7policy to show
:return:
Dict of the specified l7policy's settings
"""
return self._clients.octavia().l7policy_show(l7policy_id)
@atomic.action_timer("octavia.l7policy_set")
def l7policy_set(self, l7policy_id, **kwargs):
"""Update a l7policy's settings
:param l7policy_id:
ID of the l7policy to update
:param kwargs:
A dict of arguments to update a l7policy
:return:
Response Code from the API
"""
return self._clients.octavia().l7policy_set(l7policy_id, **kwargs)
@atomic.action_timer("octavia.l7rule_list")
def l7rule_list(self, l7policy_id, **kwargs):
"""List all l7rules for a l7policy
:param kwargs:
Parameters to filter on
:return:
List of l7policies
"""
return self._clients.octavia().l7rule_list(l7policy_id, **kwargs)
@atomic.action_timer("octavia.l7rule_create")
def l7rule_create(self, l7policy_id, **kwargs):
"""Create a l7rule
:param string l7policy_id:
The l7policy to create the l7rule for
:param kwargs:
Parameters to create a l7rule with (expects json=)
:return:
A dict of the created l7rule's settings
"""
return self._clients.octavia().l7rule_create(l7policy_id, **kwargs)
@atomic.action_timer("octavia.l7rule_delete")
def l7rule_delete(self, l7rule_id, l7policy_id):
"""Delete a l7rule
:param string l7rule_id:
ID of listener to delete
:param string l7policy_id:
ID of the l7policy for this l7rule
:return:
Response Code from the API
"""
return self._clients.octavia().l7rule_delete(l7rule_id, l7policy_id)
@atomic.action_timer("octavia.l7rule_show")
def l7rule_show(self, l7rule_id, l7policy_id):
"""Show a l7rule's settings
:param string l7rule_id:
ID of the l7rule to show
:param string l7policy_id:
ID of the l7policy for this l7rule
:return:
Dict of the specified l7rule's settings
"""
return self._clients.octavia().l7rule_show(l7rule_id, l7policy_id)
@atomic.action_timer("octavia.l7rule_set")
def l7rule_set(self, l7rule_id, l7policy_id, **kwargs):
"""Update a l7rule's settings
:param l7rule_id:
ID of the l7rule to update
:param string l7policy_id:
ID of the l7policy for this l7rule
:param kwargs:
A dict of arguments to update a l7rule
:return:
Response Code from the API
"""
return self._clients.octavia().l7rule_set(l7rule_id, l7policy_id,
**kwargs)
@atomic.action_timer("octavia.health_monitor_list")
def health_monitor_list(self, **kwargs):
"""List all health monitors
:param kwargs:
Parameters to filter on
:return:
A dict containing a list of health monitors
"""
return self._clients.octavia().health_monitor_list(**kwargs)
@atomic.action_timer("octavia.health_monitor_create")
def health_monitor_create(self, **kwargs):
"""Create a health monitor
:param kwargs:
Parameters to create a health monitor with (expects json=)
:return:
A dict of the created health monitor's settings
"""
return self._clients.octavia().health_monitor_create(**kwargs)
@atomic.action_timer("octavia.health_monitor_delete")
def health_monitor_delete(self, health_monitor_id):
"""Delete a health_monitor
:param string health_monitor_id:
ID of health monitor to delete
:return:
Response Code from the API
"""
return self._clients.octavia().health_monitor_delete(health_monitor_id)
@atomic.action_timer("octavia.health_monitor_show")
def health_monitor_show(self, health_monitor_id):
"""Show a health monitor's settings
:param string health_monitor_id:
ID of the health monitor to show
:return:
Dict of the specified health monitor's settings
"""
return self._clients.octavia().health_monitor_show(health_monitor_id)
@atomic.action_timer("octavia.health_monitor_set")
def health_monitor_set(self, health_monitor_id, **kwargs):
"""Update a health monitor's settings
:param health_monitor_id:
ID of the health monitor to update
:param kwargs:
A dict of arguments to update a l7policy
:return:
Response Code from the API
"""
return self._clients.octavia().health_monitor_set(health_monitor_id,
**kwargs)
@atomic.action_timer("octavia.quota_list")
def quota_list(self, params):
"""List all quotas
:param params:
Parameters to filter on (not implemented)
:return:
A ``dict`` representing a list of quotas for the project
"""
return self._clients.octavia().quota_list(params)
@atomic.action_timer("octavia.quota_show")
def quota_show(self, project_id):
"""Show a quota
:param string project_id:
ID of the project to show
:return:
A ``dict`` representing the quota for the project
"""
return self._clients.octavia().quota_show(project_id)
@atomic.action_timer("octavia.quota_reset")
def quota_reset(self, project_id):
"""Reset a quota
:param string project_id:
The ID of the project to reset quotas
:return:
``None``
"""
return self._clients.octavia().quota_reset(project_id)
@atomic.action_timer("octavia.quota_set")
def quota_set(self, project_id, params):
"""Update a quota's settings
:param string project_id:
The ID of the project to update
:param params:
A ``dict`` of arguments to update project quota
:return:
A ``dict`` representing the updated quota
"""
return self._clients.octavia().quota_set(project_id, params)
@atomic.action_timer("octavia.quota_defaults_show")
def quota_defaults_show(self):
"""Show quota defaults
:return:
A ``dict`` representing a list of quota defaults
"""
return self._clients.octavia().quota_defaults_show()
@atomic.action_timer("octavia.amphora_show")
def amphora_show(self, amphora_id):
"""Show an amphora
:param string amphora_id:
ID of the amphora to show
:return:
A ``dict`` of the specified amphora's attributes
"""
return self._clients.octavia().amphora_show(amphora_id)
@atomic.action_timer("octavia.amphora_list")
def amphora_list(self, **kwargs):
"""List all amphorae
:param kwargs:
Parameters to filter on
:return:
A ``dict`` containing a list of amphorae
"""
return self._clients.octavia().amphora_list(**kwargs)
@atomic.action_timer("octavia.wait_for_loadbalancers")
def wait_for_loadbalancer_prov_status(self, lb, prov_status="ACTIVE"):
return utils.wait_for_status(
lb,
ready_statuses=[prov_status],
status_attr="provisioning_status",
update_resource=lambda lb: self.load_balancer_show(lb["id"]),
timeout=CONF.openstack.octavia_create_loadbalancer_timeout,
check_interval=(
CONF.openstack.octavia_create_loadbalancer_poll_interval)
)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,702
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/magnum/test_utils.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import os
from kubernetes import client as kubernetes_client
from kubernetes.client.rest import ApiException
from rally import exceptions
from rally_openstack.task.scenarios.magnum import utils
from tests.unit import test
MAGNUM_UTILS = "rally_openstack.task.scenarios.magnum.utils"
CONF = utils.CONF
class MagnumScenarioTestCase(test.ScenarioTestCase):
def setUp(self):
super(MagnumScenarioTestCase, self).setUp()
self.cluster_template = mock.Mock()
self.cluster = mock.Mock()
self.pod = mock.Mock()
self.scenario = utils.MagnumScenario(self.context)
def test_list_cluster_templates(self):
fake_list = [self.cluster_template]
self.clients("magnum").cluster_templates.list.return_value = fake_list
return_ct_list = self.scenario._list_cluster_templates()
self.assertEqual(fake_list, return_ct_list)
self.clients("magnum").cluster_templates.list.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"magnum.list_cluster_templates")
def test_create_cluster_template(self):
self.scenario.generate_random_name = mock.Mock(
return_value="generated_name")
fake_ct = self.cluster_template
self.clients("magnum").cluster_templates.create.return_value = fake_ct
return_cluster_template = self.scenario._create_cluster_template(
image="test_image",
keypair="test_key",
external_network="public",
dns_nameserver="8.8.8.8",
flavor="m1.large",
docker_volume_size=50,
network_driver="docker",
coe="swarm")
self.assertEqual(fake_ct, return_cluster_template)
_, kwargs = self.clients("magnum").cluster_templates.create.call_args
self.assertEqual("generated_name", kwargs["name"])
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"magnum.create_cluster_template")
def test_get_cluster_template(self):
client = self.clients("magnum")
client.cluster_templates.get.return_value = self.cluster_template
return_cluster_template = self.scenario._get_cluster_template("uuid")
client.cluster_templates.get.assert_called_once_with("uuid")
self.assertEqual(self.cluster_template, return_cluster_template)
self._test_atomic_action_timer(
self.scenario.atomic_actions(), "magnum.get_cluster_template")
def test_list_clusters(self):
return_clusters_list = self.scenario._list_clusters(limit="foo1")
client = self.clients("magnum")
client.clusters.list.assert_called_once_with(limit="foo1")
self.assertEqual(client.clusters.list.return_value,
return_clusters_list)
self._test_atomic_action_timer(
self.scenario.atomic_actions(), "magnum.list_clusters")
def test_create_cluster(self):
self.scenario.generate_random_name = mock.Mock(
return_value="generated_name")
self.clients("magnum").clusters.create.return_value = self.cluster
return_cluster = self.scenario._create_cluster(
cluster_template="generated_uuid", node_count=2)
self.mock_wait_for_status.mock.assert_called_once_with(
self.cluster,
ready_statuses=["CREATE_COMPLETE"],
failure_statuses=["CREATE_FAILED", "ERROR"],
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.openstack.
magnum_cluster_create_poll_interval,
timeout=CONF.openstack.magnum_cluster_create_timeout,
id_attr="uuid")
_, kwargs = self.clients("magnum").clusters.create.call_args
self.assertEqual("generated_name", kwargs["name"])
self.assertEqual("generated_uuid", kwargs["cluster_template_id"])
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(
self.mock_wait_for_status.mock.return_value, return_cluster)
self._test_atomic_action_timer(
self.scenario.atomic_actions(), "magnum.create_cluster")
def test_get_cluster(self):
self.clients("magnum").clusters.get.return_value = self.cluster
return_cluster = self.scenario._get_cluster("uuid")
self.clients("magnum").clusters.get.assert_called_once_with("uuid")
self.assertEqual(self.cluster, return_cluster)
self._test_atomic_action_timer(
self.scenario.atomic_actions(), "magnum.get_cluster")
def test_get_ca_certificate(self):
self.scenario._get_ca_certificate(self.cluster.uuid)
self.clients("magnum").certificates.get.assert_called_once_with(
self.cluster.uuid)
self._test_atomic_action_timer(
self.scenario.atomic_actions(), "magnum.get_ca_certificate")
def test_create_ca_certificate(self):
csr_req = {"cluster_uuid": "uuid", "csr": "csr file"}
self.scenario._create_ca_certificate(csr_req)
self.clients("magnum").certificates.create.assert_called_once_with(
**csr_req)
self._test_atomic_action_timer(
self.scenario.atomic_actions(), "magnum.create_ca_certificate")
@mock.patch("kubernetes.client.api_client.ApiClient")
@mock.patch("kubernetes.client.api.core_v1_api.CoreV1Api")
def test_get_k8s_api_client_using_tls(self, mock_core_v1_api,
mock_api_client):
if hasattr(kubernetes_client, "ConfigurationObject"):
# it is k8s-client < 4.0.0
m = mock.patch("kubernetes.client.ConfigurationObject")
else:
m = mock.patch("kubernetes.client.Configuration")
mock_configuration_object = m.start()
self.addCleanup(m.stop)
self.context.update({
"ca_certs_directory": "/home/stack",
"tenant": {
"id": "rally_tenant_id",
"cluster": "rally_cluster_uuid"
}
})
self.scenario = utils.MagnumScenario(self.context)
cluster_uuid = self.context["tenant"]["cluster"]
client = self.clients("magnum")
client.clusters.get.return_value = self.cluster
cluster = self.scenario._get_cluster(cluster_uuid)
self.cluster_template.tls_disabled = False
client.cluster_templates.get.return_value = self.cluster_template
dir = self.context["ca_certs_directory"]
key_file = os.path.join(dir, cluster_uuid.__add__(".key"))
cert_file = os.path.join(dir, cluster_uuid.__add__(".crt"))
ca_certs = os.path.join(dir, cluster_uuid.__add__("_ca.crt"))
config = mock_configuration_object.return_value
config.host = cluster.api_address
config.ssl_ca_cert = ca_certs
config.cert_file = cert_file
config.key_file = key_file
_api_client = mock_api_client.return_value
self.scenario._get_k8s_api_client()
mock_configuration_object.assert_called_once_with()
if hasattr(kubernetes_client, "ConfigurationObject"):
# k8s-python < 4.0.0
mock_api_client.assert_called_once_with(config=config)
else:
mock_api_client.assert_called_once_with(config)
mock_core_v1_api.assert_called_once_with(_api_client)
@mock.patch("kubernetes.client.api_client.ApiClient")
@mock.patch("kubernetes.client.api.core_v1_api.CoreV1Api")
def test_get_k8s_api_client(self, mock_core_v1_api, mock_api_client):
if hasattr(kubernetes_client, "ConfigurationObject"):
# it is k8s-client < 4.0.0
m = mock.patch("kubernetes.client.ConfigurationObject")
else:
m = mock.patch("kubernetes.client.Configuration")
mock_configuration_object = m.start()
self.addCleanup(m.stop)
self.context.update({
"tenant": {
"id": "rally_tenant_id",
"cluster": "rally_cluster_uuid"
}
})
self.scenario = utils.MagnumScenario(self.context)
cluster_uuid = self.context["tenant"]["cluster"]
client = self.clients("magnum")
client.clusters.get.return_value = self.cluster
cluster = self.scenario._get_cluster(cluster_uuid)
self.cluster_template.tls_disabled = True
client.cluster_templates.get.return_value = self.cluster_template
config = mock_configuration_object.return_value
config.host = cluster.api_address
config.ssl_ca_cert = None
config.cert_file = None
config.key_file = None
_api_client = mock_api_client.return_value
self.scenario._get_k8s_api_client()
mock_configuration_object.assert_called_once_with()
if hasattr(kubernetes_client, "ConfigurationObject"):
# k8s-python < 4.0.0
mock_api_client.assert_called_once_with(config=config)
else:
mock_api_client.assert_called_once_with(config)
mock_core_v1_api.assert_called_once_with(_api_client)
@mock.patch(MAGNUM_UTILS + ".MagnumScenario._get_k8s_api_client")
def test_list_v1pods(self, mock__get_k8s_api_client):
k8s_api = mock__get_k8s_api_client.return_value
self.scenario._list_v1pods()
k8s_api.list_node.assert_called_once_with(
namespace="default")
self._test_atomic_action_timer(
self.scenario.atomic_actions(), "magnum.k8s_list_v1pods")
@mock.patch("random.choice")
@mock.patch(MAGNUM_UTILS + ".MagnumScenario._get_k8s_api_client")
def test_create_v1pod(self, mock__get_k8s_api_client,
mock_random_choice):
k8s_api = mock__get_k8s_api_client.return_value
manifest = (
{"apiVersion": "v1", "kind": "Pod",
"metadata": {"name": "nginx"}})
podname = manifest["metadata"]["name"] + "-"
for i in range(5):
podname = podname + mock_random_choice.return_value
k8s_api.create_namespaced_pod = mock.MagicMock(
side_effect=[ApiException(status=403), self.pod])
not_ready_pod = kubernetes_client.models.V1Pod()
not_ready_status = kubernetes_client.models.V1PodStatus()
not_ready_status.phase = "not_ready"
not_ready_pod.status = not_ready_status
almost_ready_pod = kubernetes_client.models.V1Pod()
almost_ready_status = kubernetes_client.models.V1PodStatus()
almost_ready_status.phase = "almost_ready"
almost_ready_pod.status = almost_ready_status
ready_pod = kubernetes_client.models.V1Pod()
ready_condition = kubernetes_client.models.V1PodCondition(
status="True", type="Ready")
ready_status = kubernetes_client.models.V1PodStatus()
ready_status.phase = "Running"
ready_status.conditions = [ready_condition]
ready_pod_metadata = kubernetes_client.models.V1ObjectMeta()
ready_pod_metadata.uid = "123456789"
ready_pod_spec = kubernetes_client.models.V1PodSpec(
node_name="host_abc",
containers=[]
)
ready_pod.status = ready_status
ready_pod.metadata = ready_pod_metadata
ready_pod.spec = ready_pod_spec
k8s_api.read_namespaced_pod = mock.MagicMock(
side_effect=[not_ready_pod, almost_ready_pod, ready_pod])
self.scenario._create_v1pod(manifest)
k8s_api.create_namespaced_pod.assert_called_with(
body=manifest, namespace="default")
k8s_api.read_namespaced_pod.assert_called_with(
name=podname, namespace="default")
self._test_atomic_action_timer(
self.scenario.atomic_actions(), "magnum.k8s_create_v1pod")
@mock.patch("time.time")
@mock.patch("random.choice")
@mock.patch(MAGNUM_UTILS + ".MagnumScenario._get_k8s_api_client")
def test_create_v1pod_timeout(self, mock__get_k8s_api_client,
mock_random_choice, mock_time):
k8s_api = mock__get_k8s_api_client.return_value
manifest = (
{"apiVersion": "v1", "kind": "Pod",
"metadata": {"name": "nginx"}})
k8s_api.create_namespaced_pod.return_value = self.pod
mock_time.side_effect = [1, 2, 3, 4, 5, 1800, 1801]
not_ready_pod = kubernetes_client.models.V1Pod()
not_ready_status = kubernetes_client.models.V1PodStatus()
not_ready_status.phase = "not_ready"
not_ready_pod_metadata = kubernetes_client.models.V1ObjectMeta()
not_ready_pod_metadata.uid = "123456789"
not_ready_pod.status = not_ready_status
not_ready_pod.metadata = not_ready_pod_metadata
k8s_api.read_namespaced_pod = mock.MagicMock(
side_effect=[not_ready_pod
for i in range(4)])
self.assertRaises(
exceptions.TimeoutException,
self.scenario._create_v1pod, manifest)
@mock.patch(MAGNUM_UTILS + ".MagnumScenario._get_k8s_api_client")
def test_list_v1rcs(self, mock__get_k8s_api_client):
k8s_api = mock__get_k8s_api_client.return_value
self.scenario._list_v1rcs()
(k8s_api.list_namespaced_replication_controller
.assert_called_once_with(namespace="default"))
self._test_atomic_action_timer(
self.scenario.atomic_actions(), "magnum.k8s_list_v1rcs")
@mock.patch("random.choice")
@mock.patch(MAGNUM_UTILS + ".MagnumScenario._get_k8s_api_client")
def test_create_v1rc(self, mock__get_k8s_api_client,
mock_random_choice):
k8s_api = mock__get_k8s_api_client.return_value
manifest = (
{"apiVersion": "v1",
"kind": "ReplicationController",
"metadata": {"name": "nginx-controller"},
"spec": {"replicas": 2,
"selector": {"name": "nginx"},
"template": {"metadata":
{"labels":
{"name": "nginx"}}}}})
suffix = "-"
for i in range(5):
suffix = suffix + mock_random_choice.return_value
rcname = manifest["metadata"]["name"] + suffix
rc = kubernetes_client.models.V1ReplicationController()
rc.spec = kubernetes_client.models.V1ReplicationControllerSpec()
rc.spec.replicas = manifest["spec"]["replicas"]
k8s_api.create_namespaced_replication_controller.return_value = rc
not_ready_rc = kubernetes_client.models.V1ReplicationController()
not_ready_rc_status = (
kubernetes_client.models.V1ReplicationControllerStatus(replicas=0))
not_ready_rc.status = not_ready_rc_status
ready_rc = kubernetes_client.models.V1ReplicationController()
ready_rc_status = (
kubernetes_client.models.V1ReplicationControllerStatus(
replicas=manifest["spec"]["replicas"])
)
ready_rc_metadata = kubernetes_client.models.V1ObjectMeta()
ready_rc_metadata.uid = "123456789"
ready_rc_metadata.name = rcname
ready_rc.status = ready_rc_status
ready_rc.metadata = ready_rc_metadata
k8s_api.read_namespaced_replication_controller = mock.MagicMock(
side_effect=[not_ready_rc, ready_rc])
self.scenario._create_v1rc(manifest)
(k8s_api.create_namespaced_replication_controller
.assert_called_once_with(body=manifest, namespace="default"))
(k8s_api.read_namespaced_replication_controller
.assert_called_with(name=rcname, namespace="default"))
self._test_atomic_action_timer(
self.scenario.atomic_actions(), "magnum.k8s_create_v1rc")
@mock.patch("time.time")
@mock.patch("random.choice")
@mock.patch(MAGNUM_UTILS + ".MagnumScenario._get_k8s_api_client")
def test_create_v1rc_timeout(self, mock__get_k8s_api_client,
mock_random_choice, mock_time):
k8s_api = mock__get_k8s_api_client.return_value
manifest = (
{"apiVersion": "v1",
"kind": "ReplicationController",
"metadata": {"name": "nginx-controller"},
"spec": {"replicas": 2,
"selector": {"app": "nginx"},
"template": {"metadata":
{"labels":
{"name": "nginx"}}}}})
rc = kubernetes_client.models.V1ReplicationController()
rc.spec = kubernetes_client.models.V1ReplicationControllerSpec()
rc.spec.replicas = manifest["spec"]["replicas"]
mock_time.side_effect = [1, 2, 3, 4, 5, 1800, 1801]
k8s_api.create_namespaced_replication_controller.return_value = rc
not_ready_rc = kubernetes_client.models.V1ReplicationController()
not_ready_rc_status = (
kubernetes_client.models.V1ReplicationControllerStatus(replicas=0))
not_ready_rc_metadata = kubernetes_client.models.V1ObjectMeta()
not_ready_rc_metadata.uid = "123456789"
not_ready_rc.status = not_ready_rc_status
not_ready_rc.metadata = not_ready_rc_metadata
k8s_api.read_namespaced_replication_controller = mock.MagicMock(
side_effect=[not_ready_rc
for i in range(4)])
self.assertRaises(
exceptions.TimeoutException,
self.scenario._create_v1rc, manifest)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,703
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/common/services/grafana/grafana.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
from rally.common import logging
from rally.common import utils as commonutils
from rally.task import atomic
from rally.task import service
LOG = logging.getLogger(__name__)
class GrafanaService(service.Service):
def __init__(self, spec, name_generator=None, atomic_inst=None):
"""Initialization of Grafana service.
:param spec: param contains monitoring system info: IPs, ports, creds
"""
super(GrafanaService, self).__init__(None,
name_generator=name_generator,
atomic_inst=atomic_inst)
self._spec = spec
@atomic.action_timer("grafana.check_metric")
def check_metric(self, seed, sleep_time, retries_total):
"""Check metric with seed name in Grafana datasource.
:param seed: random metric name
:param sleep_time: sleep time between checking metrics in seconds
:param retries_total: total number of retries to check metric in
Grafana
:return: True if metric in Grafana datasource and False otherwise
"""
check_url = ("http://%(vip)s:%(port)s/api/datasources/proxy/:"
"%(datasource)s/api/v1/query?query=%(seed)s" % {
"vip": self._spec["monitor_vip"],
"port": self._spec["grafana"]["port"],
"datasource": self._spec["datasource_id"],
"seed": seed
})
i = 0
LOG.info("Check metric %s in Grafana" % seed)
while i < retries_total:
LOG.debug("Attempt number %s" % (i + 1))
resp = requests.get(check_url,
auth=(self._spec["grafana"]["user"],
self._spec["grafana"]["password"]))
result = resp.json()
LOG.debug("Grafana response code: %s" % resp.status_code)
no_result = (result.get("data") is None
or len(result["data"]["result"]) < 1)
if no_result and i + 1 >= retries_total:
LOG.debug("No instance metrics found in Grafana")
return False
elif no_result:
i += 1
commonutils.interruptable_sleep(sleep_time)
else:
LOG.debug("Metric instance found in Grafana")
return True
@atomic.action_timer("grafana.push_metric")
def push_metric(self, seed):
"""Push metric by GET request using pushgateway.
:param seed: random name for metric to push
"""
push_url = "http://%(ip)s:%(port)s/metrics/job/%(job)s" % {
"ip": self._spec["monitor_vip"],
"port": self._spec["pushgateway_port"],
"job": self._spec["job_name"]
}
resp = requests.post(push_url,
headers={"Content-type": "text/xml"},
data="%s 12345\n" % seed)
if resp.ok:
LOG.info("Metric %s pushed" % seed)
else:
LOG.error("Error during push metric %s" % seed)
return resp.ok
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,704
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/contexts/network/test_networking_agents.py
|
# Copyright 2019 Ericsson Software Technology
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.contexts.network import networking_agents
from tests.unit import test
CTX = "rally_openstack.task.contexts.network"
class NetworkingAgentsTestCase(test.TestCase):
def setUp(self):
super(NetworkingAgentsTestCase, self).setUp()
self.config = {}
self.context = test.get_test_context()
self.context.update({
"users": [
{"id": 1,
"tenant_id": "tenant1",
"credential": mock.Mock()},
],
"admin": {
"credential": mock.Mock(),
},
"config": {
"networking_agents": self.config,
},
})
@mock.patch("rally_openstack.common.osclients.Clients")
def test_setup(self, mock_clients):
context = networking_agents.NetworkingAgents(self.context)
context.setup()
mock_clients.assert_has_calls([
mock.call().neutron().list_agents(),
])
def test_cleanup(self):
# NOTE(stpierre): Test that cleanup is not abstract
networking_agents.NetworkingAgents(
{"task": mock.MagicMock()}).cleanup()
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,705
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/contexts/heat/test_stacks.py
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.contexts.heat import stacks
from rally_openstack.task.scenarios.heat import utils as heat_utils
from tests.unit import fakes
from tests.unit import test
CTX = "rally_openstack.task.contexts"
SCN = "rally_openstack.task.scenarios"
class TestStackGenerator(test.ScenarioTestCase):
def _gen_tenants(self, count):
tenants = {}
for id_ in range(count):
tenants[str(id_)] = dict(name=str(id_))
return tenants
def test_init(self):
self.context.update({
"config": {
"stacks": {
"stacks_per_tenant": 1,
"resources_per_stack": 1
}
}
})
inst = stacks.StackGenerator(self.context)
self.assertEqual(inst.config, self.context["config"]["stacks"])
@mock.patch("%s.heat.utils.HeatScenario._create_stack" % SCN,
return_value=fakes.FakeStack(id="uuid"))
def test_setup(self, mock_heat_scenario__create_stack):
tenants_count = 2
users_per_tenant = 5
stacks_per_tenant = 1
tenants = self._gen_tenants(tenants_count)
users = []
for ten_id in tenants:
for i in range(users_per_tenant):
users.append({"id": i, "tenant_id": ten_id,
"credential": mock.MagicMock()})
self.context.update({
"config": {
"users": {
"tenants": tenants_count,
"users_per_tenant": users_per_tenant,
"concurrent": 10,
},
"stacks": {
"stacks_per_tenant": stacks_per_tenant,
"resources_per_stack": 1
}
},
"users": users,
"tenants": tenants
})
stack_ctx = stacks.StackGenerator(self.context)
stack_ctx.setup()
self.assertEqual(tenants_count * stacks_per_tenant,
mock_heat_scenario__create_stack.call_count)
# check that stack ids have been saved in context
for ten_id in self.context["tenants"].keys():
self.assertEqual(stacks_per_tenant,
len(self.context["tenants"][ten_id]["stacks"]))
@mock.patch("%s.heat.stacks.resource_manager.cleanup" % CTX)
def test_cleanup(self, mock_cleanup):
self.context.update({
"users": mock.MagicMock()
})
stack_ctx = stacks.StackGenerator(self.context)
stack_ctx.cleanup()
mock_cleanup.assert_called_once_with(
names=["heat.stacks"],
users=self.context["users"],
superclass=heat_utils.HeatScenario,
task_id=self.context["owner_id"])
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,706
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/cleanup/test_manager.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally.common import utils
from rally_openstack.task.cleanup import base
from rally_openstack.task.cleanup import manager
from tests.unit import test
BASE = "rally_openstack.task.cleanup.manager"
class SeekAndDestroyTestCase(test.TestCase):
def setUp(self):
super(SeekAndDestroyTestCase, self).setUp()
# clear out the client cache
manager.SeekAndDestroy.cache = {}
def test__get_cached_client(self):
destroyer = manager.SeekAndDestroy(None, None, None)
cred = mock.Mock()
user = {"credential": cred}
clients = destroyer._get_cached_client(user)
self.assertIs(cred.clients.return_value, clients)
cred.clients.assert_called_once_with()
self.assertIsNone(destroyer._get_cached_client(None))
@mock.patch("%s.LOG" % BASE)
def test__delete_single_resource(self, mock_log):
mock_resource = mock.MagicMock(_max_attempts=3, _timeout=10,
_interval=0.01)
mock_resource.delete.side_effect = [Exception, Exception, True]
mock_resource.is_deleted.side_effect = [False, False, True]
manager.SeekAndDestroy(None, None, None)._delete_single_resource(
mock_resource)
mock_resource.delete.assert_has_calls([mock.call()] * 3)
self.assertEqual(3, mock_resource.delete.call_count)
mock_resource.is_deleted.assert_has_calls([mock.call()] * 3)
self.assertEqual(3, mock_resource.is_deleted.call_count)
# NOTE(boris-42): No logs and no exceptions means no bugs!
self.assertEqual(0, mock_log.call_count)
@mock.patch("%s.LOG" % BASE)
def test__delete_single_resource_timeout(self, mock_log):
mock_resource = mock.MagicMock(_max_attempts=1, _timeout=0.02,
_interval=0.025)
mock_resource.delete.return_value = True
mock_resource.is_deleted.side_effect = [False, False, True]
manager.SeekAndDestroy(None, None, None)._delete_single_resource(
mock_resource)
mock_resource.delete.assert_called_once_with()
mock_resource.is_deleted.assert_called_once_with()
self.assertEqual(1, mock_log.warning.call_count)
@mock.patch("%s.LOG" % BASE)
def test__delete_single_resource_exception_in_is_deleted(self, mock_log):
mock_resource = mock.MagicMock(_max_attempts=3, _timeout=10,
_interval=0)
mock_resource.delete.return_value = True
mock_resource.is_deleted.side_effect = [Exception] * 4
manager.SeekAndDestroy(None, None, None)._delete_single_resource(
mock_resource)
mock_resource.delete.assert_called_once_with()
self.assertEqual(4, mock_resource.is_deleted.call_count)
self.assertEqual(1, mock_log.warning.call_count)
self.assertEqual(4, mock_log.exception.call_count)
def _manager(self, list_side_effect, **kw):
mock_mgr = mock.MagicMock()
mock_mgr().list.side_effect = list_side_effect
mock_mgr.reset_mock()
for k, v in kw.items():
setattr(mock_mgr, k, v)
return mock_mgr
@mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE)
def test__publisher_admin(self, mock__get_cached_client):
mock_mgr = self._manager([Exception, Exception, [1, 2, 3]],
_perform_for_admin_only=False)
admin = mock.MagicMock()
publish = manager.SeekAndDestroy(mock_mgr, admin, None)._publisher
queue = []
publish(queue)
mock__get_cached_client.assert_called_once_with(admin)
mock_mgr.assert_called_once_with(
admin=mock__get_cached_client.return_value)
self.assertEqual(queue, [(admin, None, x) for x in range(1, 4)])
@mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE)
def test__publisher_admin_only(self, mock__get_cached_client):
mock_mgr = self._manager([Exception, Exception, [1, 2, 3]],
_perform_for_admin_only=True)
admin = mock.MagicMock()
publish = manager.SeekAndDestroy(
mock_mgr, admin, ["u1", "u2"])._publisher
queue = []
publish(queue)
mock__get_cached_client.assert_called_once_with(admin)
mock_mgr.assert_called_once_with(
admin=mock__get_cached_client.return_value)
self.assertEqual(queue, [(admin, None, x) for x in range(1, 4)])
@mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE)
def test__publisher_user_resource(self, mock__get_cached_client):
mock_mgr = self._manager([Exception, Exception, [1, 2, 3],
Exception, Exception, [4, 5]],
_perform_for_admin_only=False,
_tenant_resource=True)
admin = mock.MagicMock()
users = [{"tenant_id": 1, "id": 1}, {"tenant_id": 2, "id": 2}]
publish = manager.SeekAndDestroy(mock_mgr, admin, users)._publisher
queue = []
publish(queue)
mock_client = mock__get_cached_client.return_value
mock_mgr.assert_has_calls([
mock.call(admin=mock_client, user=mock_client,
tenant_uuid=users[0]["tenant_id"]),
mock.call().list(),
mock.call().list(),
mock.call().list(),
mock.call(admin=mock_client, user=mock_client,
tenant_uuid=users[1]["tenant_id"]),
mock.call().list(),
mock.call().list()
])
mock__get_cached_client.assert_has_calls([
mock.call(admin),
mock.call(users[0]),
mock.call(users[1])
])
expected_queue = [(admin, users[0], x) for x in range(1, 4)]
expected_queue += [(admin, users[1], x) for x in range(4, 6)]
self.assertEqual(expected_queue, queue)
@mock.patch("%s.LOG" % BASE)
@mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE)
def test__gen_publisher_tenant_resource(self, mock__get_cached_client,
mock_log):
mock_mgr = self._manager([Exception, [1, 2, 3],
Exception, Exception, Exception,
["this shouldn't be in results"]],
_perform_for_admin_only=False,
_tenant_resource=True)
users = [{"tenant_id": 1, "id": 1},
{"tenant_id": 1, "id": 2},
{"tenant_id": 2, "id": 3}]
publish = manager.SeekAndDestroy(
mock_mgr, None, users)._publisher
queue = []
publish(queue)
mock_client = mock__get_cached_client.return_value
mock_mgr.assert_has_calls([
mock.call(admin=mock_client, user=mock_client,
tenant_uuid=users[0]["tenant_id"]),
mock.call().list(),
mock.call().list(),
mock.call(admin=mock_client, user=mock_client,
tenant_uuid=users[2]["tenant_id"]),
mock.call().list(),
mock.call().list(),
mock.call().list()
])
mock__get_cached_client.assert_has_calls([
mock.call(None),
mock.call(users[0]),
mock.call(users[2])
])
self.assertEqual(queue, [(None, users[0], x) for x in range(1, 4)])
self.assertTrue(mock_log.warning.mock_called)
self.assertTrue(mock_log.exception.mock_called)
@mock.patch("rally.common.utils.name_matches_object")
@mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE)
@mock.patch("%s.SeekAndDestroy._delete_single_resource" % BASE)
def test__consumer(self, mock__delete_single_resource,
mock__get_cached_client,
mock_name_matches_object):
mock_mgr = mock.MagicMock(__name__="Test")
resource_classes = [mock.Mock()]
task_id = "task_id"
mock_name_matches_object.return_value = True
consumer = manager.SeekAndDestroy(
mock_mgr, None, None,
resource_classes=resource_classes,
task_id=task_id)._consumer
admin = mock.MagicMock()
user1 = {"id": "a", "tenant_id": "uuid1"}
cache = {}
consumer(cache, (admin, user1, "res"))
mock_mgr.assert_called_once_with(
resource="res",
admin=mock__get_cached_client.return_value,
user=mock__get_cached_client.return_value,
tenant_uuid=user1["tenant_id"])
mock__get_cached_client.assert_has_calls([
mock.call(admin),
mock.call(user1)
])
mock__delete_single_resource.assert_called_once_with(
mock_mgr.return_value)
mock_mgr.reset_mock()
mock__get_cached_client.reset_mock()
mock__delete_single_resource.reset_mock()
mock_name_matches_object.reset_mock()
consumer(cache, (admin, None, "res2"))
mock_mgr.assert_called_once_with(
resource="res2",
admin=mock__get_cached_client.return_value,
user=mock__get_cached_client.return_value,
tenant_uuid=None)
mock__get_cached_client.assert_has_calls([
mock.call(admin),
mock.call(None)
])
mock__delete_single_resource.assert_called_once_with(
mock_mgr.return_value)
@mock.patch("rally.common.utils.name_matches_object")
@mock.patch("%s.SeekAndDestroy._get_cached_client" % BASE)
@mock.patch("%s.SeekAndDestroy._delete_single_resource" % BASE)
def test__consumer_with_noname_resource(self, mock__delete_single_resource,
mock__get_cached_client,
mock_name_matches_object):
mock_mgr = mock.MagicMock(__name__="Test")
mock_mgr.return_value.name.return_value = True
task_id = "task_id"
mock_name_matches_object.return_value = False
consumer = manager.SeekAndDestroy(mock_mgr, None, None,
task_id=task_id)._consumer
consumer(None, (None, None, "res"))
self.assertFalse(mock__delete_single_resource.called)
mock_mgr.return_value.name.return_value = base.NoName("foo")
consumer(None, (None, None, "res"))
mock__delete_single_resource.assert_called_once_with(
mock_mgr.return_value)
@mock.patch("%s.broker.run" % BASE)
def test_exterminate(self, mock_broker_run):
manager_cls = mock.MagicMock(_threads=5)
cleaner = manager.SeekAndDestroy(manager_cls, None, None)
cleaner._publisher = mock.Mock()
cleaner._consumer = mock.Mock()
cleaner.exterminate()
mock_broker_run.assert_called_once_with(cleaner._publisher,
cleaner._consumer,
consumers_count=5)
class ResourceManagerTestCase(test.TestCase):
def _get_res_mock(self, **kw):
_mock = mock.MagicMock()
for k, v in kw.items():
setattr(_mock, k, v)
return _mock
def _list_res_names_helper(self, names, admin_required, mock_iter):
self.assertEqual(set(names),
manager.list_resource_names(admin_required))
mock_iter.assert_called_once_with(base.ResourceManager)
mock_iter.reset_mock()
@mock.patch("%s.discover.itersubclasses" % BASE)
def test_list_resource_names(self, mock_itersubclasses):
mock_itersubclasses.return_value = [
self._get_res_mock(_service="fake", _resource="1",
_admin_required=True),
self._get_res_mock(_service="fake", _resource="2",
_admin_required=False),
self._get_res_mock(_service="other", _resource="2",
_admin_required=False)
]
self._list_res_names_helper(
["fake", "other", "fake.1", "fake.2", "other.2"],
None, mock_itersubclasses)
self._list_res_names_helper(
["fake", "fake.1"],
True, mock_itersubclasses)
self._list_res_names_helper(
["fake", "other", "fake.2", "other.2"],
False, mock_itersubclasses)
@mock.patch("%s.discover.itersubclasses" % BASE)
def test_find_resource_managers(self, mock_itersubclasses):
mock_itersubclasses.return_value = [
self._get_res_mock(_service="fake", _resource="1", _order=1,
_admin_required=True),
self._get_res_mock(_service="fake", _resource="2", _order=3,
_admin_required=False),
self._get_res_mock(_service="other", _resource="2", _order=2,
_admin_required=False)
]
self.assertEqual(mock_itersubclasses.return_value[0:2],
manager.find_resource_managers(names=["fake"]))
self.assertEqual(mock_itersubclasses.return_value[0:1],
manager.find_resource_managers(names=["fake.1"]))
self.assertEqual(
[mock_itersubclasses.return_value[0],
mock_itersubclasses.return_value[2],
mock_itersubclasses.return_value[1]],
manager.find_resource_managers(names=["fake", "other"]))
self.assertEqual(mock_itersubclasses.return_value[0:1],
manager.find_resource_managers(names=["fake"],
admin_required=True))
self.assertEqual(mock_itersubclasses.return_value[1:2],
manager.find_resource_managers(names=["fake"],
admin_required=False))
@mock.patch("rally.common.plugin.discover.itersubclasses")
@mock.patch("%s.SeekAndDestroy" % BASE)
@mock.patch("%s.find_resource_managers" % BASE,
return_value=[mock.MagicMock(), mock.MagicMock()])
def test_cleanup(self, mock_find_resource_managers, mock_seek_and_destroy,
mock_itersubclasses):
class A(utils.RandomNameGeneratorMixin):
pass
class B(object):
pass
mock_itersubclasses.return_value = [A, B]
manager.cleanup(names=["a", "b"], admin_required=True,
admin="admin", users=["user"],
superclass=A,
task_id="task_id")
mock_find_resource_managers.assert_called_once_with(["a", "b"], True)
mock_seek_and_destroy.assert_has_calls([
mock.call(mock_find_resource_managers.return_value[0],
"admin",
["user"],
resource_classes=[A],
task_id="task_id"),
mock.call().exterminate(),
mock.call(mock_find_resource_managers.return_value[1],
"admin",
["user"],
resource_classes=[A],
task_id="task_id"),
mock.call().exterminate()
])
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,707
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/cinder/qos_specs.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.cinder import utils as cinder_utils
"""Scenarios for Cinder QoS."""
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["cinder"]},
name="CinderQos.create_and_list_qos", platform="openstack")
class CreateAndListQos(cinder_utils.CinderBasic):
def run(self, consumer, write_iops_sec, read_iops_sec):
"""Create a qos, then list all qos.
:param consumer: Consumer behavior
:param write_iops_sec: random write limitation
:param read_iops_sec: random read limitation
"""
specs = {
"consumer": consumer,
"write_iops_sec": write_iops_sec,
"read_iops_sec": read_iops_sec
}
qos = self.admin_cinder.create_qos(specs)
pool_list = self.admin_cinder.list_qos()
msg = ("Qos not included into list of available qos\n"
"created qos:{}\n"
"Pool of qos:{}").format(qos, pool_list)
self.assertIn(qos, pool_list, err_msg=msg)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["cinder"]},
name="CinderQos.create_and_get_qos", platform="openstack")
class CreateAndGetQos(cinder_utils.CinderBasic):
def run(self, consumer, write_iops_sec, read_iops_sec):
"""Create a qos, then get details of the qos.
:param consumer: Consumer behavior
:param write_iops_sec: random write limitation
:param read_iops_sec: random read limitation
"""
specs = {
"consumer": consumer,
"write_iops_sec": write_iops_sec,
"read_iops_sec": read_iops_sec
}
qos = self.admin_cinder.create_qos(specs)
self.admin_cinder.get_qos(qos.id)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["cinder"]},
name="CinderQos.create_and_set_qos", platform="openstack")
class CreateAndSetQos(cinder_utils.CinderBasic):
def run(self, consumer, write_iops_sec, read_iops_sec,
set_consumer, set_write_iops_sec, set_read_iops_sec):
"""Create a qos, then Add/Update keys in qos specs.
:param consumer: Consumer behavior
:param write_iops_sec: random write limitation
:param read_iops_sec: random read limitation
:param set_consumer: update Consumer behavior
:param set_write_iops_sec: update random write limitation
:param set_read_iops_sec: update random read limitation
"""
create_specs = {
"consumer": consumer,
"write_iops_sec": write_iops_sec,
"read_iops_sec": read_iops_sec
}
set_specs = {
"consumer": set_consumer,
"write_iops_sec": set_write_iops_sec,
"read_iops_sec": set_read_iops_sec
}
qos = self.admin_cinder.create_qos(create_specs)
self.admin_cinder.set_qos(qos=qos, set_specs_args=set_specs)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", admin=True)
@validation.add("required_contexts", contexts=("volume_types"))
@scenario.configure(
context={"admin_cleanup@openstack": ["cinder"]},
name="CinderQos.create_qos_associate_and_disassociate_type",
platform="openstack")
class CreateQosAssociateAndDisassociateType(cinder_utils.CinderBasic):
def run(self, consumer, write_iops_sec, read_iops_sec):
"""Create a qos, Associate and Disassociate the qos from volume type.
:param consumer: Consumer behavior
:param write_iops_sec: random write limitation
:param read_iops_sec: random read limitation
"""
specs = {
"consumer": consumer,
"write_iops_sec": write_iops_sec,
"read_iops_sec": read_iops_sec
}
qos = self.admin_cinder.create_qos(specs)
vt_idx = self.context["iteration"] % len(self.context["volume_types"])
volume_type = self.context["volume_types"][vt_idx]
self.admin_cinder.qos_associate_type(qos_specs=qos,
volume_type=volume_type["id"])
self.admin_cinder.qos_disassociate_type(qos_specs=qos,
volume_type=volume_type["id"])
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,708
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/magnum/clusters.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import validation
from rally_openstack.common import consts
from rally_openstack.task.cleanup import manager as resource_manager
from rally_openstack.task import context
from rally_openstack.task.scenarios.magnum import utils as magnum_utils
from rally_openstack.task.scenarios.nova import utils as nova_utils
@validation.add("required_platform", platform="openstack", users=True)
@context.configure(name="clusters", platform="openstack", order=480)
class ClusterGenerator(context.OpenStackContext):
"""Creates specified amount of Magnum clusters."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"cluster_template_uuid": {
"type": "string"
},
"node_count": {
"type": "integer",
"minimum": 1,
},
},
"additionalProperties": False
}
DEFAULT_CONFIG = {"node_count": 1}
def setup(self):
for user, tenant_id in self._iterate_per_tenants():
nova_scenario = nova_utils.NovaScenario({
"user": user,
"task": self.context["task"],
"owner_id": self.context["owner_id"],
"config": {"api_versions": self.context["config"].get(
"api_versions", [])}
})
keypair = nova_scenario._create_keypair()
magnum_scenario = magnum_utils.MagnumScenario({
"user": user,
"task": self.context["task"],
"owner_id": self.context["owner_id"],
"config": {"api_versions": self.context["config"].get(
"api_versions", [])}
})
# create a cluster
ct_uuid = self.config.get("cluster_template_uuid", None)
if ct_uuid is None:
ctx = self.context["tenants"][tenant_id]
ct_uuid = ctx.get("cluster_template")
cluster = magnum_scenario._create_cluster(
cluster_template=ct_uuid,
node_count=self.config.get("node_count"), keypair=keypair)
self.context["tenants"][tenant_id]["cluster"] = cluster.uuid
def cleanup(self):
resource_manager.cleanup(
names=["magnum.clusters", "nova.keypairs"],
users=self.context.get("users", []),
superclass=magnum_utils.MagnumScenario,
task_id=self.get_owner_id())
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,709
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/designate/zones.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import validation
from rally_openstack.common import consts
from rally_openstack.task.cleanup import manager as resource_manager
from rally_openstack.task import context
from rally_openstack.task.scenarios.designate import utils
from rally_openstack.task.scenarios.neutron import utils as neutron_utils
@validation.add("required_platform", platform="openstack", users=True)
@context.configure(name="zones", platform="openstack", order=600)
class ZoneGenerator(context.OpenStackContext):
"""Context to add `zones_per_tenant` zones for each tenant."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"zones_per_tenant": {
"type": "integer",
"minimum": 1
},
"set_zone_in_network": {
"type": "boolean",
"description": "Update network with created DNS zone."
}
},
"additionalProperties": False
}
DEFAULT_CONFIG = {
"zones_per_tenant": 1,
"set_zone_in_network": False
}
def setup(self):
for user, tenant_id in self._iterate_per_tenants(
self.context["users"]):
self.context["tenants"][tenant_id].setdefault("zones", [])
designate_util = utils.DesignateScenario(
{"user": user,
"task": self.context["task"],
"owner_id": self.context["owner_id"]})
for i in range(self.config["zones_per_tenant"]):
zone = designate_util._create_zone()
self.context["tenants"][tenant_id]["zones"].append(zone)
if self.config["set_zone_in_network"]:
for user, tenant_id in self._iterate_per_tenants(
self.context["users"]):
tenant = self.context["tenants"][tenant_id]
network_update_args = {
"dns_domain": tenant["zones"][0]["name"]
}
body = {"network": network_update_args}
scenario = neutron_utils.NeutronScenario(
context={"user": user, "task": self.context["task"],
"owner_id": self.context["owner_id"]}
)
scenario.clients("neutron").update_network(
tenant["networks"][0]["id"], body)
def cleanup(self):
resource_manager.cleanup(names=["designate.zones"],
users=self.context.get("users", []),
superclass=utils.DesignateScenario,
task_id=self.get_owner_id())
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,710
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/vm/custom_image.py
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from rally.common import broker
from rally.common import logging
from rally.common import utils
from rally_openstack.common import consts
from rally_openstack.common import osclients
from rally_openstack.common.services.image import image
from rally_openstack.task import context
from rally_openstack.task.scenarios.vm import vmtasks
from rally_openstack.task import types
LOG = logging.getLogger(__name__)
class BaseCustomImageGenerator(context.OpenStackContext,
metaclass=abc.ABCMeta):
"""Base plugin for the contexts providing customized image with.
Every context plugin for the specific customization must implement
the method `_customize_image` that is able to connect to the server
using SSH and install applications inside it.
This base context plugin provides a way to prepare an image with
custom preinstalled applications. Basically, this code boots a VM, calls
the `_customize_image` and then snapshots the VM disk, removing the VM
afterwards. The image UUID is stored in the user["custom_image"]["id"]
and can be used afterwards by scenario.
"""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"image": {
"type": "object",
"properties": {
"name": {
"type": "string"
}
},
"additionalProperties": False
},
"flavor": {
"type": "object",
"properties": {
"name": {
"type": "string"
}
},
"additionalProperties": False
},
"username": {
"type": "string"
},
"password": {
"type": "string"
},
"floating_network": {
"type": "string"
},
"internal_network": {
"type": "string"
},
"port": {
"type": "integer",
"minimum": 1,
"maximum": 65535
},
"userdata": {
"type": "string"
},
"workers": {
"type": "integer",
"minimum": 1,
}
},
"required": ["image", "flavor"],
"additionalProperties": False
}
DEFAULT_CONFIG = {
"username": "root",
"port": 22,
"workers": 1
}
def setup(self):
"""Creates custom image(s) with preinstalled applications.
When admin is present creates one public image that is usable
from all the tenants and users. Otherwise create one image
per user and tenant.
"""
if "admin" in self.context:
if self.context["users"]:
# NOTE(pboldin): Create by first user and make it public by
# the admin
user = self.context["users"][0]
else:
user = self.context["admin"]
tenant = self.context["tenants"][user["tenant_id"]]
nics = None
if "networks" in tenant:
nics = [{"net-id": tenant["networks"][0]["id"]}]
custom_image = self.create_one_image(user, nics=nics)
glance_service = image.Image(
self.context["admin"]["credential"].clients())
glance_service.set_visibility(custom_image.id)
for tenant in self.context["tenants"].values():
tenant["custom_image"] = custom_image
else:
def publish(queue):
for user, tenant_id in self._iterate_per_tenants():
queue.append((user, tenant_id))
def consume(cache, args):
user, tenant_id = args
tenant = self.context["tenants"][tenant_id]
tenant["custom_image"] = self.create_one_image(user)
broker.run(publish, consume, self.config["workers"])
def create_one_image(self, user, **kwargs):
"""Create one image for the user."""
clients = osclients.Clients(user["credential"])
image_id = types.GlanceImage(self.context).pre_process(
resource_spec=self.config["image"], config={})
flavor_id = types.Flavor(self.context).pre_process(
resource_spec=self.config["flavor"], config={})
vm_scenario = vmtasks.BootRuncommandDelete(self.context,
clients=clients)
server, fip = vm_scenario._boot_server_with_fip(
image=image_id, flavor=flavor_id,
floating_network=self.config.get("floating_network"),
userdata=self.config.get("userdata"),
key_name=user["keypair"]["name"],
security_groups=[user["secgroup"]["name"]],
**kwargs)
try:
LOG.debug("Installing tools on %r %s" % (server, fip["ip"]))
self.customize_image(server, fip, user)
LOG.debug("Stopping server %r" % server)
vm_scenario._stop_server(server)
LOG.debug("Creating snapshot for %r" % server)
custom_image = vm_scenario._create_image(server)
finally:
vm_scenario._delete_server_with_fip(server, fip)
return custom_image
def cleanup(self):
"""Delete created custom image(s)."""
if "admin" in self.context:
user = self.context["users"][0]
tenant = self.context["tenants"][user["tenant_id"]]
if "custom_image" in tenant:
self.delete_one_image(user, tenant["custom_image"])
tenant.pop("custom_image")
else:
def publish(queue):
users = self.context.get("users", [])
for user, tenant_id in utils.iterate_per_tenants(users):
queue.append((user, tenant_id))
def consume(cache, args):
user, tenant_id = args
tenant = self.context["tenants"][tenant_id]
if "custom_image" in tenant:
self.delete_one_image(user, tenant["custom_image"])
tenant.pop("custom_image")
broker.run(publish, consume, self.config["workers"])
def delete_one_image(self, user, custom_image):
"""Delete the image created for the user and tenant."""
with logging.ExceptionLogger(
LOG, "Unable to delete image %s" % custom_image.id):
glance_service = image.Image(user["credential"].clients())
glance_service.delete_image(custom_image.id)
@logging.log_task_wrapper(LOG.info, "Custom image context: customizing")
def customize_image(self, server, ip, user):
return self._customize_image(server, ip, user)
@abc.abstractmethod
def _customize_image(self, server, ip, user):
"""Override this method with one that customizes image.
Basically, code can simply call `VMScenario._run_command` function
specifying an installation script and interpreter. This script will
be then executed using SSH.
:param server: nova.Server instance
:param ip: dict with server IP details
:param user: user who started a VM instance. Used to extract keypair
"""
pass
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,711
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/zaqar/basic.py
|
# Copyright (c) 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from rally.common import logging
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.zaqar import utils as zutils
"""Scenarios for Zaqar."""
@scenario.configure(context={"cleanup@openstack": ["zaqar"]},
name="ZaqarBasic.create_queue", platform="openstack")
class CreateQueue(zutils.ZaqarScenario):
@logging.log_deprecated_args(
"The 'name_length' argument to create_queue is ignored",
"0.1.2", ["name_length"], once=True)
def run(self, name_length=None, **kwargs):
"""Create a Zaqar queue with a random name.
:param kwargs: other optional parameters to create queues like
"metadata"
"""
self._queue_create(**kwargs)
@scenario.configure(context={"cleanup@openstack": ["zaqar"]},
name="ZaqarBasic.producer_consumer", platform="openstack")
class ProducerConsumer(zutils.ZaqarScenario):
@logging.log_deprecated_args(
"The 'name_length' argument to producer_consumer is ignored",
"0.1.2", ["name_length"], once=True)
def run(self, name_length=None,
min_msg_count=50, max_msg_count=200, **kwargs):
"""Serial message producer/consumer.
Creates a Zaqar queue with random name, sends a set of messages
and then retrieves an iterator containing those.
:param min_msg_count: min number of messages to be posted
:param max_msg_count: max number of messages to be posted
:param kwargs: other optional parameters to create queues like
"metadata"
"""
queue = self._queue_create(**kwargs)
msg_count = random.randint(min_msg_count, max_msg_count)
messages = [{"body": {"id": idx}, "ttl": 360} for idx
in range(msg_count)]
self._messages_post(queue, messages, min_msg_count, max_msg_count)
self._messages_list(queue)
self._queue_delete(queue)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,712
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/nova/keypairs.py
|
# Copyright 2015: Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.task import types
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.nova import utils
"""Scenarios for Nova keypairs."""
@validation.add("required_services", services=[consts.Service.NOVA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["nova"]},
name="NovaKeypair.create_and_list_keypairs",
platform="openstack")
class CreateAndListKeypairs(utils.NovaScenario):
def run(self, **kwargs):
"""Create a keypair with random name and list keypairs.
This scenario creates a keypair and then lists all keypairs.
:param kwargs: Optional additional arguments for keypair creation
"""
keypair_name = self._create_keypair(**kwargs)
self.assertTrue(keypair_name, "Keypair isn't created")
list_keypairs = self._list_keypairs()
self.assertIn(keypair_name, [i.id for i in list_keypairs])
@validation.add("required_services", services=[consts.Service.NOVA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["nova"]},
name="NovaKeypair.create_and_delete_keypair",
platform="openstack")
class CreateAndDeleteKeypair(utils.NovaScenario):
def run(self, **kwargs):
"""Create a keypair with random name and delete keypair.
This scenario creates a keypair and then delete that keypair.
:param kwargs: Optional additional arguments for keypair creation
"""
keypair = self._create_keypair(**kwargs)
self._delete_keypair(keypair)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.add("image_valid_on_flavor", flavor_param="flavor",
image_param="image")
@validation.add("required_services", services=[consts.Service.NOVA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["nova"]},
name="NovaKeypair.boot_and_delete_server_with_keypair",
platform="openstack")
class BootAndDeleteServerWithKeypair(utils.NovaScenario):
def run(self, image, flavor, boot_server_kwargs=None, **kwargs):
"""Boot and delete server with keypair.
Plan of this scenario:
- create a keypair
- boot a VM with created keypair
- delete server
- delete keypair
:param image: ID of the image to be used for server creation
:param flavor: ID of the flavor to be used for server creation
:param boot_server_kwargs: Optional additional arguments for VM
creation
:param kwargs: Optional additional arguments for keypair creation
"""
keypair = self._create_keypair(**kwargs)
server = self._boot_server(image, flavor,
key_name=keypair,
**(boot_server_kwargs) or {})
self._delete_server(server)
self._delete_keypair(keypair)
@validation.add("required_services", services=[consts.Service.NOVA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["nova"]},
name="NovaKeypair.create_and_get_keypair",
platform="openstack")
class CreateAndGetKeypair(utils.NovaScenario):
def run(self, **kwargs):
"""Create a keypair and get the keypair details.
:param kwargs: Optional additional arguments for keypair creation
"""
keypair = self._create_keypair(**kwargs)
self._get_keypair(keypair)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,713
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/senlin/test_utils.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally.common import cfg
from rally import exceptions
from rally_openstack.task.scenarios.senlin import utils
from tests.unit import test
SENLIN_UTILS = "rally_openstack.task.scenarios.senlin.utils."
CONF = cfg.CONF
class SenlinScenarioTestCase(test.ScenarioTestCase):
def test_list_cluster(self):
fake_cluster_list = ["cluster1", "cluster2"]
self.admin_clients("senlin").clusters.return_value = fake_cluster_list
scenario = utils.SenlinScenario(self.context)
result = scenario._list_clusters()
self.assertEqual(list(fake_cluster_list), result)
self.admin_clients("senlin").clusters.assert_called_once_with()
def test_list_cluster_with_queries(self):
fake_cluster_list = ["cluster1", "cluster2"]
self.admin_clients("senlin").clusters.return_value = fake_cluster_list
scenario = utils.SenlinScenario(self.context)
result = scenario._list_clusters(status="ACTIVE")
self.assertEqual(list(fake_cluster_list), result)
self.admin_clients("senlin").clusters.assert_called_once_with(
status="ACTIVE")
@mock.patch(SENLIN_UTILS + "SenlinScenario.generate_random_name",
return_value="test_cluster")
def test_create_cluster(self, mock_generate_random_name):
fake_cluster = mock.Mock(id="fake_cluster_id")
res_cluster = mock.Mock()
self.admin_clients("senlin").create_cluster.return_value = fake_cluster
self.mock_wait_for_status.mock.return_value = res_cluster
scenario = utils.SenlinScenario(self.context)
result = scenario._create_cluster("fake_profile_id",
desired_capacity=1,
min_size=0,
max_size=3,
metadata={"k1": "v1"},
timeout=60)
self.assertEqual(res_cluster, result)
self.admin_clients("senlin").create_cluster.assert_called_once_with(
profile_id="fake_profile_id", name="test_cluster",
desired_capacity=1, min_size=0, max_size=3, metadata={"k1": "v1"},
timeout=60)
self.mock_wait_for_status.mock.assert_called_once_with(
fake_cluster, ready_statuses=["ACTIVE"],
failure_statuses=["ERROR"],
update_resource=scenario._get_cluster,
timeout=CONF.openstack.senlin_action_timeout)
mock_generate_random_name.assert_called_once_with()
self._test_atomic_action_timer(scenario.atomic_actions(),
"senlin.create_cluster")
def test_get_cluster(self):
fake_cluster = mock.Mock(id="fake_cluster_id")
scenario = utils.SenlinScenario(context=self.context)
scenario._get_cluster(fake_cluster)
self.admin_clients("senlin").get_cluster.assert_called_once_with(
"fake_cluster_id")
def test_get_cluster_notfound(self):
fake_cluster = mock.Mock(id="fake_cluster_id")
ex = Exception()
ex.code = 404
self.admin_clients("senlin").get_cluster.side_effect = ex
scenario = utils.SenlinScenario(context=self.context)
self.assertRaises(exceptions.GetResourceNotFound,
scenario._get_cluster,
fake_cluster)
self.admin_clients("senlin").get_cluster.assert_called_once_with(
"fake_cluster_id")
def test_get_cluster_failed(self):
fake_cluster = mock.Mock(id="fake_cluster_id")
ex = Exception()
ex.code = 500
self.admin_clients("senlin").get_cluster.side_effect = ex
scenario = utils.SenlinScenario(context=self.context)
self.assertRaises(exceptions.GetResourceFailure,
scenario._get_cluster,
fake_cluster)
self.admin_clients("senlin").get_cluster.assert_called_once_with(
"fake_cluster_id")
def test_delete_cluster(self):
fake_cluster = mock.Mock()
scenario = utils.SenlinScenario(context=self.context)
scenario._delete_cluster(fake_cluster)
self.admin_clients("senlin").delete_cluster.assert_called_once_with(
fake_cluster)
self.mock_wait_for_status.mock.assert_called_once_with(
fake_cluster, ready_statuses=["DELETED"],
failure_statuses=["ERROR"], check_deletion=True,
update_resource=scenario._get_cluster,
timeout=CONF.openstack.senlin_action_timeout)
self._test_atomic_action_timer(scenario.atomic_actions(),
"senlin.delete_cluster")
@mock.patch(SENLIN_UTILS + "SenlinScenario.generate_random_name",
return_value="test_profile")
def test_create_profile(self, mock_generate_random_name):
test_spec = {
"version": "1.0",
"type": "test_type",
"properties": {
"key1": "value1"
}
}
scenario = utils.SenlinScenario(self.context)
result = scenario._create_profile(test_spec, metadata={"k2": "v2"})
self.assertEqual(
self.clients("senlin").create_profile.return_value, result)
self.clients("senlin").create_profile.assert_called_once_with(
spec=test_spec, name="test_profile", metadata={"k2": "v2"})
mock_generate_random_name.assert_called_once_with()
self._test_atomic_action_timer(scenario.atomic_actions(),
"senlin.create_profile")
def test_delete_profile(self):
fake_profile = mock.Mock()
scenario = utils.SenlinScenario(context=self.context)
scenario._delete_profile(fake_profile)
self.clients("senlin").delete_profile.assert_called_once_with(
fake_profile)
self._test_atomic_action_timer(scenario.atomic_actions(),
"senlin.delete_profile")
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,714
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/common/wrappers/test_network.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
from neutronclient.common import exceptions as neutron_exceptions
from rally.common import utils
from rally_openstack.common import consts
from rally_openstack.common.wrappers import network
from tests.unit import test
SVC = "rally_openstack.common.wrappers.network."
class Owner(utils.RandomNameGeneratorMixin):
task = {"uuid": "task-uuid"}
@ddt.ddt
class NeutronWrapperTestCase(test.TestCase):
def setUp(self):
super(NeutronWrapperTestCase, self).setUp()
self.owner = Owner()
self.owner.generate_random_name = mock.Mock()
clients = mock.MagicMock()
clients.credential.permission = consts.EndpointPermission.ADMIN
self.wrapper = network.NeutronWrapper(
clients, self.owner, config={})
self._nc = self.wrapper.neutron.client
def test_SUBNET_IP_VERSION(self):
self.assertEqual(4, network.NeutronWrapper.SUBNET_IP_VERSION)
@mock.patch(
"rally_openstack.common.services.network.net_utils.generate_cidr")
def test__generate_cidr(self, mock_generate_cidr):
cidrs = iter(range(5))
def fake_gen_cidr(ip_version=None, start_cidr=None):
return 4, 3 + next(cidrs)
mock_generate_cidr.side_effect = fake_gen_cidr
self.assertEqual(3, self.wrapper._generate_cidr())
self.assertEqual(4, self.wrapper._generate_cidr())
self.assertEqual(5, self.wrapper._generate_cidr())
self.assertEqual(6, self.wrapper._generate_cidr())
self.assertEqual(7, self.wrapper._generate_cidr())
self.assertEqual([mock.call(start_cidr=self.wrapper.start_cidr)] * 5,
mock_generate_cidr.call_args_list)
def test_external_networks(self):
self._nc.list_networks.return_value = {"networks": "foo_networks"}
self.assertEqual("foo_networks", self.wrapper.external_networks)
self._nc.list_networks.assert_called_once_with(
**{"router:external": True})
def test_get_network(self):
neutron_net = {"id": "foo_id",
"name": "foo_name",
"tenant_id": "foo_tenant",
"status": "foo_status",
"router:external": "foo_external",
"subnets": "foo_subnets"}
expected_net = {"id": "foo_id",
"name": "foo_name",
"tenant_id": "foo_tenant",
"status": "foo_status",
"external": "foo_external",
"router_id": None,
"subnets": "foo_subnets"}
self._nc.show_network.return_value = {"network": neutron_net}
net = self.wrapper.get_network(net_id="foo_id")
self.assertEqual(expected_net, net)
self._nc.show_network.assert_called_once_with("foo_id")
self._nc.show_network.side_effect = (
neutron_exceptions.NeutronClientException)
self.assertRaises(network.NetworkWrapperException,
self.wrapper.get_network,
net_id="foo_id")
self._nc.list_networks.return_value = {"networks": [neutron_net]}
net = self.wrapper.get_network(name="foo_name")
self.assertEqual(expected_net, net)
self._nc.list_networks.assert_called_once_with(name="foo_name")
self._nc.list_networks.return_value = {"networks": []}
self.assertRaises(network.NetworkWrapperException,
self.wrapper.get_network,
name="foo_name")
def test_create_v1_pool(self):
subnet = "subnet_id"
tenant = "foo_tenant"
expected_pool = {"pool": {
"id": "pool_id",
"name": self.owner.generate_random_name.return_value,
"subnet_id": subnet,
"tenant_id": tenant}}
self.wrapper.client.create_pool.return_value = expected_pool
resultant_pool = self.wrapper.create_v1_pool(tenant, subnet)
self.wrapper.client.create_pool.assert_called_once_with({
"pool": {"lb_method": "ROUND_ROBIN",
"subnet_id": subnet,
"tenant_id": tenant,
"protocol": "HTTP",
"name": self.owner.generate_random_name.return_value}})
self.assertEqual(expected_pool, resultant_pool)
def test_create_network(self):
self._nc.create_network.return_value = {
"network": {"id": "foo_id",
"name": self.owner.generate_random_name.return_value,
"status": "foo_status"}}
net = self.wrapper.create_network("foo_tenant")
self._nc.create_network.assert_called_once_with({
"network": {"tenant_id": "foo_tenant",
"name": self.owner.generate_random_name.return_value}})
self.assertEqual({"id": "foo_id",
"name": self.owner.generate_random_name.return_value,
"status": "foo_status",
"external": False,
"tenant_id": "foo_tenant",
"router_id": None,
"subnets": []}, net)
def test_create_network_with_subnets(self):
subnets_num = 4
subnets_ids = iter(range(subnets_num))
self._nc.create_subnet.side_effect = lambda i: {
"subnet": {"id": "subnet-%d" % next(subnets_ids)}}
self._nc.create_network.return_value = {
"network": {"id": "foo_id",
"name": self.owner.generate_random_name.return_value,
"status": "foo_status"}}
net = self.wrapper.create_network("foo_tenant",
subnets_num=subnets_num)
self._nc.create_network.assert_called_once_with({
"network": {"tenant_id": "foo_tenant",
"name": self.owner.generate_random_name.return_value}})
self.assertEqual({"id": "foo_id",
"name": self.owner.generate_random_name.return_value,
"status": "foo_status",
"external": False,
"router_id": None,
"tenant_id": "foo_tenant",
"subnets": ["subnet-%d" % i
for i in range(subnets_num)]}, net)
self.assertEqual(
[mock.call({"subnet":
{"name": self.owner.generate_random_name.return_value,
"network_id": "foo_id",
"tenant_id": "foo_tenant",
"ip_version": self.wrapper.SUBNET_IP_VERSION,
"dns_nameservers": ["8.8.8.8", "8.8.4.4"],
"cidr": mock.ANY}})
for i in range(subnets_num)],
self.wrapper.client.create_subnet.call_args_list
)
def test_create_network_with_router(self):
self._nc.create_router.return_value = {"router": {"id": "foo_router"}}
self._nc.create_network.return_value = {
"network": {"id": "foo_id",
"name": self.owner.generate_random_name.return_value,
"status": "foo_status"}}
net = self.wrapper.create_network("foo_tenant", add_router=True)
self.assertEqual({"id": "foo_id",
"name": self.owner.generate_random_name.return_value,
"status": "foo_status",
"external": False,
"tenant_id": "foo_tenant",
"router_id": "foo_router",
"subnets": []}, net)
self._nc.create_router.assert_called_once_with({
"router": {
"name": self.owner.generate_random_name(),
"tenant_id": "foo_tenant"
}
})
def test_create_network_with_router_and_subnets(self):
subnets_num = 4
self.wrapper._generate_cidr = mock.Mock(return_value="foo_cidr")
self._nc.create_router.return_value = {"router": {"id": "foo_router"}}
self._nc.create_subnet.return_value = {"subnet": {"id": "foo_subnet"}}
self._nc.create_network.return_value = {
"network": {"id": "foo_id",
"name": self.owner.generate_random_name.return_value,
"status": "foo_status"}}
net = self.wrapper.create_network(
"foo_tenant", add_router=True, subnets_num=subnets_num,
dns_nameservers=["foo_nameservers"])
self.assertEqual({"id": "foo_id",
"name": self.owner.generate_random_name.return_value,
"status": "foo_status",
"external": False,
"tenant_id": "foo_tenant",
"router_id": "foo_router",
"subnets": ["foo_subnet"] * subnets_num}, net)
self._nc.create_router.assert_called_once_with(
{"router": {"name": self.owner.generate_random_name.return_value,
"tenant_id": "foo_tenant"}})
self.assertEqual(
[
mock.call(
{"subnet": {
"name": self.owner.generate_random_name.return_value,
"network_id": "foo_id",
"tenant_id": "foo_tenant",
"ip_version": self.wrapper.SUBNET_IP_VERSION,
"dns_nameservers": ["foo_nameservers"],
"cidr": mock.ANY
}}
)
] * subnets_num,
self._nc.create_subnet.call_args_list,
)
self.assertEqual(self._nc.add_interface_router.call_args_list,
[mock.call("foo_router", {"subnet_id": "foo_subnet"})
for i in range(subnets_num)])
def test_delete_v1_pool(self):
pool = {"pool": {"id": "pool-id"}}
self.wrapper.delete_v1_pool(pool["pool"]["id"])
self.wrapper.client.delete_pool.assert_called_once_with("pool-id")
def test_delete_network(self):
self._nc.list_ports.return_value = {"ports": []}
self._nc.list_subnets.return_value = {"subnets": []}
self._nc.delete_network.return_value = "foo_deleted"
self.wrapper.delete_network(
{"id": "foo_id", "router_id": None, "subnets": [], "name": "x",
"status": "y", "external": False})
self.assertFalse(self._nc.remove_gateway_router.called)
self.assertFalse(self._nc.remove_interface_router.called)
self.assertFalse(self._nc.client.delete_router.called)
self.assertFalse(self._nc.client.delete_subnet.called)
self._nc.delete_network.assert_called_once_with("foo_id")
def test_delete_network_with_router_and_ports_and_subnets(self):
subnets = ["foo_subnet", "bar_subnet"]
ports = [{"id": "foo_port", "device_owner": "network:router_interface",
"device_id": "rounttter"},
{"id": "bar_port", "device_owner": "network:dhcp"}]
self._nc.list_ports.return_value = ({"ports": ports})
self._nc.list_subnets.return_value = (
{"subnets": [{"id": id_} for id_ in subnets]})
self.wrapper.delete_network(
{"id": "foo_id", "router_id": "foo_router", "subnets": subnets,
"lb_pools": [], "name": "foo", "status": "x", "external": False})
self.assertEqual(self._nc.remove_gateway_router.mock_calls,
[mock.call("foo_router")])
self._nc.delete_port.assert_called_once_with(ports[1]["id"])
self._nc.remove_interface_router.assert_called_once_with(
ports[0]["device_id"], {"port_id": ports[0]["id"]})
self.assertEqual(
[mock.call(subnet_id) for subnet_id in subnets],
self._nc.delete_subnet.call_args_list
)
self._nc.delete_network.assert_called_once_with("foo_id")
@ddt.data({"exception_type": neutron_exceptions.NotFound,
"should_raise": False},
{"exception_type": neutron_exceptions.BadRequest,
"should_raise": False},
{"exception_type": KeyError,
"should_raise": True})
@ddt.unpack
def test_delete_network_with_router_throw_exception(
self, exception_type, should_raise):
# Ensure cleanup context still move forward even
# remove_interface_router throw NotFound/BadRequest exception
self._nc.remove_interface_router.side_effect = exception_type
subnets = ["foo_subnet", "bar_subnet"]
ports = [{"id": "foo_port", "device_owner": "network:router_interface",
"device_id": "rounttter"},
{"id": "bar_port", "device_owner": "network:dhcp"}]
self._nc.list_ports.return_value = {"ports": ports}
self._nc.list_subnets.return_value = {"subnets": [
{"id": id_} for id_ in subnets]}
if should_raise:
self.assertRaises(
exception_type, self.wrapper.delete_network,
{"id": "foo_id", "name": "foo", "router_id": "foo_router",
"subnets": subnets, "lb_pools": [], "status": "xxx",
"external": False})
self.assertFalse(self._nc.delete_subnet.called)
self.assertFalse(self._nc.delete_network.called)
else:
self.wrapper.delete_network(
{"id": "foo_id", "name": "foo", "status": "xxx",
"router_id": "foo_router", "subnets": subnets,
"lb_pools": [], "external": False})
self._nc.delete_port.assert_called_once_with(ports[1]["id"])
self._nc.remove_interface_router.assert_called_once_with(
ports[0]["device_id"], {"port_id": ports[0]["id"]})
self.assertEqual(
[mock.call(subnet_id) for subnet_id in subnets],
self._nc.delete_subnet.call_args_list
)
self._nc.delete_network.assert_called_once_with("foo_id")
self._nc.remove_gateway_router.assert_called_once_with(
"foo_router")
def test_list_networks(self):
self._nc.list_networks.return_value = {"networks": "foo_nets"}
self.assertEqual("foo_nets", self.wrapper.list_networks())
self._nc.list_networks.assert_called_once_with()
def test_create_floating_ip(self):
self._nc.create_port.return_value = {"port": {"id": "port_id"}}
self._nc.create_floatingip.return_value = {
"floatingip": {"id": "fip_id", "floating_ip_address": "fip_ip"}}
self.assertRaises(ValueError, self.wrapper.create_floating_ip)
self._nc.list_networks.return_value = {"networks": []}
self.assertRaises(network.NetworkWrapperException,
self.wrapper.create_floating_ip,
tenant_id="foo_tenant")
self._nc.list_networks.return_value = {"networks": [{"id": "ext_id"}]}
fip = self.wrapper.create_floating_ip(
tenant_id="foo_tenant", port_id="port_id")
self.assertEqual({"id": "fip_id", "ip": "fip_ip"}, fip)
self._nc.list_networks.return_value = {"networks": [
{"id": "ext_net_id", "name": "ext_net", "router:external": True}]}
self.wrapper.create_floating_ip(
tenant_id="foo_tenant", ext_network="ext_net", port_id="port_id")
self.assertRaises(
network.NetworkWrapperException,
self.wrapper.create_floating_ip, tenant_id="foo_tenant",
ext_network="ext_net_2")
def test_delete_floating_ip(self):
self.wrapper.delete_floating_ip("fip_id")
self.wrapper.delete_floating_ip("fip_id", ignored_kwarg="bar")
self.assertEqual([mock.call("fip_id")] * 2,
self._nc.delete_floatingip.call_args_list)
def test_create_router(self):
self._nc.create_router.return_value = {"router": "foo_router"}
self._nc.list_extensions.return_value = {
"extensions": [{"alias": "ext-gw-mode"}]}
self._nc.list_networks.return_value = {"networks": [{"id": "ext_id"}]}
router = self.wrapper.create_router()
self._nc.create_router.assert_called_once_with(
{"router": {"name": self.owner.generate_random_name.return_value}})
self.assertEqual("foo_router", router)
self.wrapper.create_router(external=True, flavor_id="bar")
self._nc.create_router.assert_called_with(
{"router": {"name": self.owner.generate_random_name.return_value,
"external_gateway_info": {
"network_id": "ext_id",
"enable_snat": True},
"flavor_id": "bar"}})
def test_create_router_without_ext_gw_mode_extension(self):
self._nc.create_router.return_value = {"router": "foo_router"}
self._nc.list_extensions.return_value = {"extensions": []}
self._nc.list_networks.return_value = {"networks": [{"id": "ext_id"}]}
router = self.wrapper.create_router()
self._nc.create_router.assert_called_once_with(
{"router": {"name": self.owner.generate_random_name.return_value}})
self.assertEqual(router, "foo_router")
self.wrapper.create_router(external=True, flavor_id="bar")
self._nc.create_router.assert_called_with(
{"router": {"name": self.owner.generate_random_name.return_value,
"external_gateway_info": {"network_id": "ext_id"},
"flavor_id": "bar"}})
def test_create_port(self):
self._nc.create_port.return_value = {"port": "foo_port"}
port = self.wrapper.create_port("foo_net")
self._nc.create_port.assert_called_once_with(
{"port": {"network_id": "foo_net",
"name": self.owner.generate_random_name.return_value}})
self.assertEqual("foo_port", port)
port = self.wrapper.create_port("foo_net", foo="bar")
self.wrapper.client.create_port.assert_called_with(
{"port": {"network_id": "foo_net",
"name": self.owner.generate_random_name.return_value,
"foo": "bar"}})
def test_supports_extension(self):
self._nc.list_extensions.return_value = (
{"extensions": [{"alias": "extension"}]})
self.assertTrue(self.wrapper.supports_extension("extension")[0])
self.wrapper.neutron._cached_supported_extensions = None
self._nc.list_extensions.return_value = (
{"extensions": [{"alias": "extension"}]})
self.assertFalse(self.wrapper.supports_extension("dummy-group")[0])
self.wrapper.neutron._cached_supported_extensions = None
self._nc.list_extensions.return_value = {"extensions": []}
self.assertFalse(self.wrapper.supports_extension("extension")[0])
class FunctionsTestCase(test.TestCase):
def test_wrap(self):
mock_clients = mock.Mock()
config = {"fakearg": "fake"}
owner = Owner()
mock_clients.services.return_value = {"foo": consts.Service.NEUTRON}
wrapper = network.wrap(mock_clients, owner, config)
self.assertIsInstance(wrapper, network.NeutronWrapper)
self.assertEqual(wrapper.owner, owner)
self.assertEqual(wrapper.config, config)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,715
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/watcher/basic.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import logging
from rally.task import types
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.watcher import utils
"""Scenarios for Watcher servers."""
@types.convert(strategy={"type": "watcher_strategy"},
goal={"type": "watcher_goal"})
@validation.add("required_services", services=[consts.Service.WATCHER])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["watcher"]},
name="Watcher.create_audit_template_and_delete",
platform="openstack")
class CreateAuditTemplateAndDelete(utils.WatcherScenario):
@logging.log_deprecated_args("Extra field has been removed "
"since it isn't used.", "0.8.0", ["extra"],
once=True)
def run(self, goal, strategy):
"""Create audit template and delete it.
:param goal: The goal audit template is based on
:param strategy: The strategy used to provide resource optimization
algorithm
"""
audit_template = self._create_audit_template(goal, strategy)
self._delete_audit_template(audit_template.uuid)
@validation.add("required_services", services=[consts.Service.WATCHER])
@scenario.configure(name="Watcher.list_audit_templates", platform="openstack")
class ListAuditTemplates(utils.WatcherScenario):
def run(self, name=None, goal=None, strategy=None,
limit=None, sort_key=None, sort_dir=None,
detail=False):
"""List existing audit templates.
Audit templates are being created by Audit Template Context.
:param name: Name of the audit template
:param goal: Name of the goal
:param strategy: Name of the strategy
:param limit: The maximum number of results to return per
request, if:
1) limit > 0, the maximum number of audit templates to return.
2) limit == 0, return the entire list of audit_templates.
3) limit param is NOT specified (None), the number of items
returned respect the maximum imposed by the Watcher API
(see Watcher's api.max_limit option).
:param sort_key: Optional, field used for sorting.
:param sort_dir: Optional, direction of sorting, either 'asc' (the
default) or 'desc'.
:param detail: Optional, boolean whether to return detailed information
about audit_templates.
"""
self._list_audit_templates(name=name, goal=goal, strategy=strategy,
limit=limit, sort_key=sort_key,
sort_dir=sort_dir, detail=detail)
@validation.add("required_services", services=[consts.Service.WATCHER])
@validation.add("required_contexts", contexts="audit_templates")
@scenario.configure(context={"admin_cleanup@openstack": ["watcher"]},
name="Watcher.create_audit_and_delete",
platform="openstack")
class CreateAuditAndDelete(utils.WatcherScenario):
def run(self):
"""Create and delete audit.
Create Audit, wait until whether Audit is in SUCCEEDED state or in
FAILED and delete audit.
"""
audit_template_uuid = self.context["audit_templates"][0]
audit = self._create_audit(audit_template_uuid)
self._delete_audit(audit)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,716
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/neutron/test_loadbalancer_v1.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
from rally_openstack.task.scenarios.neutron import loadbalancer_v1
from tests.unit import test
@ddt.ddt
class NeutronLoadbalancerv1TestCase(test.TestCase):
def _get_context(self):
context = test.get_test_context()
context.update({
"user": {
"id": "fake_user",
"tenant_id": "fake_tenant",
"credential": mock.MagicMock()
},
"tenant": {"id": "fake_tenant",
"networks": [{"id": "fake_net",
"subnets": ["fake_subnet"]}]}})
return context
@ddt.data(
{},
{"pool_create_args": None},
{"pool_create_args": {}},
{"pool_create_args": {"name": "given-name"}},
)
@ddt.unpack
def test_create_and_list_pools(self, pool_create_args=None):
scenario = loadbalancer_v1.CreateAndListPools(self._get_context())
pool_data = pool_create_args or {}
networks = self._get_context()["tenant"]["networks"]
scenario._create_v1_pools = mock.Mock()
scenario._list_v1_pools = mock.Mock()
scenario.run(pool_create_args=pool_create_args)
scenario._create_v1_pools.assert_called_once_with(networks,
**pool_data)
scenario._list_v1_pools.assert_called_once_with()
@ddt.data(
{},
{"pool_create_args": None},
{"pool_create_args": {}},
{"pool_create_args": {"name": "given-name"}},
)
@ddt.unpack
def test_create_and_delete_pools(self, pool_create_args=None):
scenario = loadbalancer_v1.CreateAndDeletePools(self._get_context())
pools = [{
"pool": {
"id": "pool-id"
}
}]
pool_data = pool_create_args or {}
networks = self._get_context()["tenant"]["networks"]
scenario._create_v1_pools = mock.Mock(return_value=pools)
scenario._delete_v1_pool = mock.Mock()
scenario.run(pool_create_args=pool_create_args)
self.assertEqual([mock.call(networks, **pool_data)],
scenario._create_v1_pools.mock_calls)
for _ in pools:
self.assertEqual(1, scenario._delete_v1_pool.call_count)
@ddt.data(
{},
{"pool_create_args": None},
{"pool_create_args": {}},
{"pool_create_args": {"name": "given-name"}},
{"pool_update_args": None},
{"pool_update_args": {}},
{"pool_update_args": {"name": "updated-name"}},
{"pool_create_args": None, "pool_update_args": None},
{"pool_create_args": {"name": "given-name"},
"pool_update_args": {"name": "updated-name"}},
{"pool_create_args": None,
"pool_update_args": {"name": "updated-name"}},
{"pool_create_args": None, "pool_update_args": {}},
{"pool_create_args": {}, "pool_update_args": None},
)
@ddt.unpack
def test_create_and_update_pools(self, pool_create_args=None,
pool_update_args=None):
scenario = loadbalancer_v1.CreateAndUpdatePools(self._get_context())
pools = [{
"pool": {
"id": "pool-id"
}
}]
updated_pool = {
"pool": {
"id": "pool-id",
"name": "updated-pool",
"admin_state_up": True
}
}
pool_data = pool_create_args or {}
pool_update_args = pool_update_args or {}
pool_update_args.update({"name": "_updated", "admin_state_up": True})
scenario._create_v1_pools = mock.Mock(return_value=pools)
scenario._update_v1_pool = mock.Mock(return_value=updated_pool)
networks = self._get_context()["tenant"]["networks"]
scenario.run(pool_create_args=pool_data,
pool_update_args=pool_update_args)
self.assertEqual([mock.call(networks, **pool_data)],
scenario._create_v1_pools.mock_calls)
for pool in pools:
scenario._update_v1_pool.assert_called_once_with(
pool, **pool_update_args)
@ddt.data(
{},
{"vip_create_args": None},
{"vip_create_args": {}},
{"vip_create_args": {"name": "given-vip-name"}},
{"pool_create_args": None},
{"pool_create_args": {}},
{"pool_create_args": {"name": "given-pool-name"}},
)
@ddt.unpack
def test_create_and_list_vips(self, pool_create_args=None,
vip_create_args=None):
scenario = loadbalancer_v1.CreateAndListVips(self._get_context())
pools = [{
"pool": {
"id": "pool-id"
}
}]
vip_data = vip_create_args or {}
pool_data = pool_create_args or {}
networks = self._get_context()["tenant"]["networks"]
scenario._create_v1_pools = mock.Mock(return_value=pools)
scenario._create_v1_vip = mock.Mock()
scenario._list_v1_vips = mock.Mock()
scenario.run(pool_create_args=pool_create_args,
vip_create_args=vip_create_args)
scenario._create_v1_pools.assert_called_once_with(networks,
**pool_data)
scenario._create_v1_vip.assert_has_calls(
[mock.call(pool, **vip_data) for pool in pools])
scenario._list_v1_vips.assert_called_once_with()
@ddt.data(
{},
{"vip_create_args": None},
{"vip_create_args": {}},
{"vip_create_args": {"name": "given-name"}},
{"pool_create_args": None},
{"pool_create_args": {}},
{"pool_create_args": {"name": "given-pool-name"}},
)
@ddt.unpack
def test_create_and_delete_vips(self, pool_create_args=None,
vip_create_args=None):
scenario = loadbalancer_v1.CreateAndDeleteVips(self._get_context())
pools = [{
"pool": {
"id": "pool-id"
}
}]
vip = {
"vip": {
"id": "vip-id"
}
}
vip_data = vip_create_args or {}
pool_data = pool_create_args or {}
networks = self._get_context()["tenant"]["networks"]
scenario._create_v1_pools = mock.Mock(return_value=pools)
scenario._create_v1_vip = mock.Mock(return_value=vip)
scenario._delete_v1_vip = mock.Mock()
scenario.run(pool_create_args=pool_create_args,
vip_create_args=vip_create_args)
scenario._create_v1_pools.assert_called_once_with(networks,
**pool_data)
scenario._create_v1_vip.assert_has_calls(
[mock.call(pool, **vip_data) for pool in pools])
scenario._delete_v1_vip.assert_has_calls([mock.call(vip["vip"])])
@ddt.data(
{},
{"vip_create_args": None},
{"vip_create_args": {}},
{"vip_create_args": {"name": "given-vip-name"}},
{"pool_create_args": None},
{"pool_create_args": {}},
{"pool_create_args": {"name": "given-pool-name"}},
)
@ddt.unpack
def test_create_and_update_vips(self, pool_create_args=None,
vip_create_args=None,
vip_update_args=None):
scenario = loadbalancer_v1.CreateAndUpdateVips(self._get_context())
pools = [{
"pool": {
"id": "pool-id",
}
}]
expected_vip = {
"vip": {
"id": "vip-id",
"name": "vip-name"
}
}
updated_vip = {
"vip": {
"id": "vip-id",
"name": "updated-vip-name"
}
}
vips = [expected_vip]
vip_data = vip_create_args or {}
vip_update_data = vip_update_args or {}
pool_data = pool_create_args or {}
networks = self._get_context()["tenant"]["networks"]
scenario._create_v1_pools = mock.Mock(return_value=pools)
scenario._create_v1_vip = mock.Mock(return_value=expected_vip)
scenario._update_v1_vip = mock.Mock(return_value=updated_vip)
scenario.run(pool_create_args=pool_create_args,
vip_create_args=vip_create_args,
vip_update_args=vip_update_args)
scenario._create_v1_pools.assert_called_once_with(networks,
**pool_data)
scenario._create_v1_vip.assert_has_calls(
[mock.call(pool, **vip_data) for pool in pools])
scenario._update_v1_vip.assert_has_calls(
[mock.call(vip, **vip_update_data) for vip in vips])
@ddt.data(
{},
{"healthmonitor_create_args": None},
{"healthmonitor_create_args": {}},
{"healthmonitor_create_args": {"name": "given-name"}},
)
@ddt.unpack
def test_create_and_list_healthmonitors(self,
healthmonitor_create_args=None):
scenario = loadbalancer_v1.CreateAndListHealthmonitors(
self._get_context())
hm_data = healthmonitor_create_args or {}
scenario._create_v1_healthmonitor = mock.Mock()
scenario._list_v1_healthmonitors = mock.Mock()
scenario.run(healthmonitor_create_args=healthmonitor_create_args)
scenario._create_v1_healthmonitor.assert_called_once_with(**hm_data)
scenario._list_v1_healthmonitors.assert_called_once_with()
@ddt.data(
{},
{"healthmonitor_create_args": None},
{"healthmonitor_create_args": {}},
{"healthmonitor_create_args": {"name": "given-name"}},
)
@ddt.unpack
def test_create_and_delete_healthmonitors(self,
healthmonitor_create_args=None):
scenario = loadbalancer_v1.CreateAndDeleteHealthmonitors(
self._get_context())
hm = {"health_monitor": {"id": "hm-id"}}
hm_data = healthmonitor_create_args or {}
scenario._create_v1_healthmonitor = mock.Mock(return_value=hm)
scenario._delete_v1_healthmonitor = mock.Mock()
scenario.run(healthmonitor_create_args=healthmonitor_create_args)
scenario._create_v1_healthmonitor.assert_called_once_with(**hm_data)
scenario._delete_v1_healthmonitor.assert_called_once_with(
scenario._create_v1_healthmonitor.return_value["health_monitor"])
@ddt.data(
{},
{"healthmonitor_create_args": None},
{"healthmonitor_create_args": {}},
{"healthmonitor_create_args": {"name": "given-name"}},
)
@ddt.unpack
def test_create_and_update_healthmonitors(self,
healthmonitor_create_args=None,
healthmonitor_update_args=None):
scenario = loadbalancer_v1.CreateAndUpdateHealthmonitors(
self._get_context())
mock_random = loadbalancer_v1.random = mock.Mock()
hm = {"healthmonitor": {"id": "hm-id"}}
hm_data = healthmonitor_create_args or {}
hm_update_data = healthmonitor_update_args or {
"max_retries": mock_random.choice.return_value}
scenario._create_v1_healthmonitor = mock.Mock(return_value=hm)
scenario._update_v1_healthmonitor = mock.Mock()
scenario.run(healthmonitor_create_args=healthmonitor_create_args,
healthmonitor_update_args=healthmonitor_update_args)
scenario._create_v1_healthmonitor.assert_called_once_with(**hm_data)
scenario._update_v1_healthmonitor.assert_called_once_with(
scenario._create_v1_healthmonitor.return_value, **hm_update_data)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,717
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/contexts/keystone/test_roles.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally import exceptions
from rally_openstack.task.contexts.keystone import roles
from tests.unit import fakes
from tests.unit import test
CTX = "rally_openstack.task.contexts.keystone.roles"
class RoleGeneratorTestCase(test.TestCase):
def create_default_roles_and_patch_add_remove_functions(self, fc):
fc.keystone().roles.add_user_role = mock.MagicMock()
fc.keystone().roles.remove_user_role = mock.MagicMock()
fc.keystone().roles.create("r1", "test_role1")
fc.keystone().roles.create("r2", "test_role2")
self.assertEqual(2, len(fc.keystone().roles.list()))
@property
def context(self):
return {
"config": {
"roles": [
"test_role1",
"test_role2"
]
},
"admin": {"credential": mock.MagicMock()},
"task": mock.MagicMock()
}
@mock.patch("%s.osclients" % CTX)
def test_add_role(self, mock_osclients):
fc = fakes.FakeClients()
mock_osclients.Clients.return_value = fc
self.create_default_roles_and_patch_add_remove_functions(fc)
ctx = roles.RoleGenerator(self.context)
ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"},
{"id": "u2", "tenant_id": "t2"}]
ctx.credential = mock.MagicMock()
ctx.setup()
expected = {"r1": "test_role1", "r2": "test_role2"}
self.assertEqual(expected, ctx.context["roles"])
@mock.patch("%s.osclients" % CTX)
def test_add_role_which_does_not_exist(self, mock_osclients):
fc = fakes.FakeClients()
mock_osclients.Clients.return_value = fc
self.create_default_roles_and_patch_add_remove_functions(fc)
ctx = roles.RoleGenerator(self.context)
ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"},
{"id": "u2", "tenant_id": "t2"}]
ctx.config = ["unknown_role"]
ctx.credential = mock.MagicMock()
ex = self.assertRaises(exceptions.NotFoundException,
ctx._get_role_object, "unknown_role")
expected = ("The resource can not be found: There is no role "
"with name `unknown_role`")
self.assertEqual(expected, str(ex))
@mock.patch("%s.osclients" % CTX)
def test_remove_role(self, mock_osclients):
fc = fakes.FakeClients()
mock_osclients.Clients.return_value = fc
self.create_default_roles_and_patch_add_remove_functions(fc)
ctx = roles.RoleGenerator(self.context)
ctx.context["roles"] = {"r1": "test_role1",
"r2": "test_role2"}
ctx.context["users"] = [{"id": "u1", "tenant_id": "t1",
"assigned_roles": ["r1", "r2"]},
{"id": "u2", "tenant_id": "t2",
"assigned_roles": ["r1", "r2"]}]
ctx.credential = mock.MagicMock()
ctx.cleanup()
calls = [
mock.call(user="u1", role="r1", tenant="t1"),
mock.call(user="u2", role="r1", tenant="t2"),
mock.call(user="u1", role="r2", tenant="t1"),
mock.call(user="u2", role="r2", tenant="t2")
]
fc.keystone().roles.remove_user_role.assert_has_calls(calls,
any_order=True)
@mock.patch("%s.osclients" % CTX)
def test_setup_and_cleanup(self, mock_osclients):
fc = fakes.FakeClients()
mock_osclients.Clients.return_value = fc
self.create_default_roles_and_patch_add_remove_functions(fc)
def _get_user_role_ids_side_effect(user_id, project_id):
return ["r1", "r2"] if user_id == "u3" else []
with roles.RoleGenerator(self.context) as ctx:
ctx.context["users"] = [{"id": "u1", "tenant_id": "t1"},
{"id": "u2", "tenant_id": "t2"},
{"id": "u3", "tenant_id": "t3"}]
ctx._get_user_role_ids = mock.MagicMock()
ctx._get_user_role_ids.side_effect = _get_user_role_ids_side_effect
ctx.setup()
ctx.credential = mock.MagicMock()
calls = [
mock.call(user="u1", role="r1", tenant="t1"),
mock.call(user="u2", role="r1", tenant="t2"),
mock.call(user="u1", role="r2", tenant="t1"),
mock.call(user="u2", role="r2", tenant="t2"),
]
fc.keystone().roles.add_user_role.assert_has_calls(calls,
any_order=True)
self.assertEqual(
4, fc.keystone().roles.add_user_role.call_count)
self.assertEqual(
0, fc.keystone().roles.remove_user_role.call_count)
self.assertEqual(2, len(ctx.context["roles"]))
self.assertEqual(2, len(fc.keystone().roles.list()))
# Cleanup (called by context manager)
self.assertEqual(2, len(fc.keystone().roles.list()))
self.assertEqual(4, fc.keystone().roles.add_user_role.call_count)
self.assertEqual(4, fc.keystone().roles.remove_user_role.call_count)
calls = [
mock.call(user="u1", role="r1", tenant="t1"),
mock.call(user="u2", role="r1", tenant="t2"),
mock.call(user="u1", role="r2", tenant="t1"),
mock.call(user="u2", role="r2", tenant="t2")
]
fc.keystone().roles.remove_user_role.assert_has_calls(calls,
any_order=True)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,718
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/nova/test_servers.py
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
from rally import exceptions as rally_exceptions
from rally_openstack.task.scenarios.nova import servers
from tests.unit import fakes
from tests.unit import test
NOVA_SERVERS_MODULE = "rally_openstack.task.scenarios.nova.servers"
NOVA_SERVERS = NOVA_SERVERS_MODULE + ".NovaServers"
@ddt.ddt
class NovaServersTestCase(test.ScenarioTestCase):
@ddt.data(("rescue_unrescue", ["_rescue_server", "_unrescue_server"], 1),
("stop_start", ["_stop_server", "_start_server"], 2),
("pause_unpause", ["_pause_server", "_unpause_server"], 3),
("suspend_resume", ["_suspend_server", "_resume_server"], 4),
("lock_unlock", ["_lock_server", "_unlock_server"], 5),
("shelve_unshelve", ["_shelve_server", "_unshelve_server"], 6))
@ddt.unpack
def test_action_pair(self, action_pair, methods, nof_calls):
actions = [{action_pair: nof_calls}]
fake_server = mock.MagicMock()
scenario = servers.BootAndBounceServer(self.context)
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
scenario.generate_random_name = mock.MagicMock(return_value="name")
for method in methods:
setattr(scenario, method, mock.MagicMock())
scenario.run("img", 1, actions=actions)
scenario._boot_server.assert_called_once_with("img", 1)
server_calls = []
for i in range(nof_calls):
server_calls.append(mock.call(fake_server))
for method in methods:
mocked_method = getattr(scenario, method)
self.assertEqual(nof_calls, mocked_method.call_count,
"%s not called %d times" % (method, nof_calls))
mocked_method.assert_has_calls(server_calls)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_multiple_bounce_actions(self):
actions = [{"hard_reboot": 5}, {"stop_start": 8},
{"rescue_unrescue": 3}, {"pause_unpause": 2},
{"suspend_resume": 4}, {"lock_unlock": 6},
{"shelve_unshelve": 7}]
fake_server = mock.MagicMock()
scenario = servers.BootAndBounceServer(self.context)
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
scenario._reboot_server = mock.MagicMock()
scenario._stop_and_start_server = mock.MagicMock()
scenario._rescue_and_unrescue_server = mock.MagicMock()
scenario._pause_and_unpause_server = mock.MagicMock()
scenario._suspend_and_resume_server = mock.MagicMock()
scenario._lock_and_unlock_server = mock.MagicMock()
scenario._shelve_and_unshelve_server = mock.MagicMock()
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario.run("img", 1, actions=actions)
scenario._boot_server.assert_called_once_with("img", 1)
server_calls = []
for i in range(5):
server_calls.append(mock.call(fake_server))
self.assertEqual(5, scenario._reboot_server.call_count,
"Reboot not called 5 times")
scenario._reboot_server.assert_has_calls(server_calls)
server_calls = []
for i in range(8):
server_calls.append(mock.call(fake_server))
self.assertEqual(8, scenario._stop_and_start_server.call_count,
"Stop/Start not called 8 times")
scenario._stop_and_start_server.assert_has_calls(server_calls)
server_calls = []
for i in range(3):
server_calls.append(mock.call(fake_server))
self.assertEqual(3, scenario._rescue_and_unrescue_server.call_count,
"Rescue/Unrescue not called 3 times")
scenario._rescue_and_unrescue_server.assert_has_calls(server_calls)
server_calls = []
for i in range(2):
server_calls.append(mock.call(fake_server))
self.assertEqual(2, scenario._pause_and_unpause_server.call_count,
"Pause/Unpause not called 2 times")
scenario._pause_and_unpause_server.assert_has_calls(server_calls)
server_calls = []
for i in range(4):
server_calls.append(mock.call(fake_server))
self.assertEqual(4, scenario._suspend_and_resume_server.call_count,
"Suspend/Resume not called 4 times")
scenario._suspend_and_resume_server.assert_has_calls(server_calls)
server_calls = []
for i in range(6):
server_calls.append(mock.call(fake_server))
self.assertEqual(6, scenario._lock_and_unlock_server.call_count,
"Lock/Unlock not called 6 times")
scenario._lock_and_unlock_server.assert_has_calls(server_calls)
server_calls = []
for i in range(7):
server_calls.append(mock.call(fake_server))
self.assertEqual(7, scenario._shelve_and_unshelve_server.call_count,
"Shelve/Unshelve not called 7 times")
scenario._shelve_and_unshelve_server.assert_has_calls(server_calls)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_boot_lock_unlock_and_delete(self):
server = fakes.FakeServer()
image = fakes.FakeImage()
flavor = fakes.FakeFlavor()
scenario = servers.BootLockUnlockAndDelete(self.context)
scenario._boot_server = mock.Mock(return_value=server)
scenario._lock_server = mock.Mock(side_effect=lambda s: s.lock())
scenario._unlock_server = mock.Mock(side_effect=lambda s: s.unlock())
scenario._delete_server = mock.Mock(
side_effect=lambda s, **kwargs:
self.assertFalse(getattr(s, "OS-EXT-STS:locked", False)))
scenario.run(image, flavor, fakearg="fakearg")
scenario._boot_server.assert_called_once_with(image, flavor,
fakearg="fakearg")
scenario._lock_server.assert_called_once_with(server)
scenario._unlock_server.assert_called_once_with(server)
scenario._delete_server.assert_called_once_with(server, force=False)
@ddt.data("hard_reboot", "soft_reboot", "stop_start",
"rescue_unrescue", "pause_unpause", "suspend_resume",
"lock_unlock", "shelve_unshelve")
def test_validate_actions(self, action):
scenario = servers.BootAndBounceServer(self.context)
self.assertRaises(rally_exceptions.InvalidConfigException,
scenario.run,
1, 1, actions=[{action: "no"}])
self.assertRaises(rally_exceptions.InvalidConfigException,
scenario.run,
1, 1, actions=[{action: -1}])
self.assertRaises(rally_exceptions.InvalidConfigException,
scenario.run,
1, 1, actions=[{action: 0}])
def test_validate_actions_additional(self):
scenario = servers.BootAndBounceServer(self.context)
self.assertRaises(rally_exceptions.InvalidConfigException,
scenario.run,
1, 1, actions=[{"not_existing_action": "no"}])
# NOTE: next should fail because actions parameter is a just a
# dictionary, not an array of dictionaries
self.assertRaises(rally_exceptions.InvalidConfigException,
scenario.run,
1, 1, actions={"hard_reboot": 1})
def _verify_reboot(self, soft=True):
actions = [{"soft_reboot" if soft else "hard_reboot": 5}]
fake_server = mock.MagicMock()
scenario = servers.BootAndBounceServer(self.context)
scenario._reboot_server = mock.MagicMock()
scenario._soft_reboot_server = mock.MagicMock()
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario.run("img", 1, actions=actions)
scenario._boot_server.assert_called_once_with("img", 1)
server_calls = []
for i in range(5):
server_calls.append(mock.call(fake_server))
if soft:
self.assertEqual(5, scenario._soft_reboot_server.call_count,
"Reboot not called 5 times")
scenario._soft_reboot_server.assert_has_calls(server_calls)
else:
self.assertEqual(5, scenario._reboot_server.call_count,
"Reboot not called 5 times")
scenario._reboot_server.assert_has_calls(server_calls)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_boot_soft_reboot(self):
self._verify_reboot(soft=True)
def test_boot_hard_reboot(self):
self._verify_reboot(soft=False)
def test_boot_and_delete_server(self):
fake_server = object()
scenario = servers.BootAndDeleteServer(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
scenario.sleep_between = mock.MagicMock()
scenario.run("img", 0, 10, 20, fakearg="fakearg")
scenario._boot_server.assert_called_once_with("img", 0,
fakearg="fakearg")
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_boot_and_delete_multiple_servers(self):
scenario = servers.BootAndDeleteMultipleServers(self.context)
scenario._boot_servers = mock.Mock()
scenario._delete_servers = mock.Mock()
scenario.sleep_between = mock.Mock()
scenario.run("img", "flavor", count=15, min_sleep=10,
max_sleep=20, fakearg="fakearg")
scenario._boot_servers.assert_called_once_with("img", "flavor", 1,
instances_amount=15,
fakearg="fakearg")
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._delete_servers.assert_called_once_with(
scenario._boot_servers.return_value, force=False)
def test_boot_and_list_server(self):
scenario = servers.BootAndListServer(self.context)
# scenario.generate_random_name = mock.MagicMock(return_value="name")
img_name = "img"
flavor_uuid = 0
details = True
fake_server_name = mock.MagicMock()
scenario._boot_server = mock.MagicMock()
scenario._list_servers = mock.MagicMock()
scenario._list_servers.return_value = [mock.MagicMock(),
fake_server_name,
mock.MagicMock()]
# Positive case
scenario._boot_server.return_value = fake_server_name
scenario.run(img_name, flavor_uuid, fakearg="fakearg")
scenario._boot_server.assert_called_once_with(img_name, flavor_uuid,
fakearg="fakearg")
scenario._list_servers.assert_called_once_with(details)
# Negative case1: server isn't created
scenario._boot_server.return_value = None
self.assertRaises(rally_exceptions.RallyAssertionError,
scenario.run,
img_name, flavor_uuid, fakearg="fakearg")
scenario._boot_server.assert_called_with(img_name, flavor_uuid,
fakearg="fakearg")
# Negative case2: server not in the list of available servers
scenario._boot_server.return_value = mock.MagicMock()
self.assertRaises(rally_exceptions.RallyAssertionError,
scenario.run,
img_name, flavor_uuid, fakearg="fakearg")
scenario._boot_server.assert_called_with(img_name, flavor_uuid,
fakearg="fakearg")
scenario._list_servers.assert_called_with(details)
def test_suspend_and_resume_server(self):
fake_server = object()
scenario = servers.SuspendAndResumeServer(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._suspend_server = mock.MagicMock()
scenario._resume_server = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario.run("img", 0, fakearg="fakearg")
scenario._boot_server.assert_called_once_with("img", 0,
fakearg="fakearg")
scenario._suspend_server.assert_called_once_with(fake_server)
scenario._resume_server.assert_called_once_with(fake_server)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_pause_and_unpause_server(self):
fake_server = object()
scenario = servers.PauseAndUnpauseServer(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._pause_server = mock.MagicMock()
scenario._unpause_server = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario.run("img", 0, fakearg="fakearg")
scenario._boot_server.assert_called_once_with("img", 0,
fakearg="fakearg")
scenario._pause_server.assert_called_once_with(fake_server)
scenario._unpause_server.assert_called_once_with(fake_server)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_shelve_and_unshelve_server(self):
fake_server = mock.MagicMock()
scenario = servers.ShelveAndUnshelveServer(self.context)
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._shelve_server = mock.MagicMock()
scenario._unshelve_server = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario.run("img", 0, fakearg="fakearg")
scenario._boot_server.assert_called_once_with("img", 0,
fakearg="fakearg")
scenario._shelve_server.assert_called_once_with(fake_server)
scenario._unshelve_server.assert_called_once_with(fake_server)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_list_servers(self):
scenario = servers.ListServers(self.context)
scenario._list_servers = mock.MagicMock()
scenario.run(True)
scenario._list_servers.assert_called_once_with(True)
@mock.patch("rally_openstack.common.services.storage.block.BlockStorage")
def test_boot_server_from_volume(self, mock_block_storage):
fake_server = object()
scenario = servers.BootServerFromVolume(
self.context, clients=mock.Mock())
scenario._boot_server = mock.MagicMock(return_value=fake_server)
fake_volume = fakes.FakeVolumeManager().create()
fake_volume.id = "volume_id"
cinder = mock_block_storage.return_value
cinder.create_volume.return_value = fake_volume
scenario.run("img", 0, 5, volume_type=None,
auto_assign_nic=False, fakearg="f")
cinder.create_volume.assert_called_once_with(5, imageRef="img",
volume_type=None)
scenario._boot_server.assert_called_once_with(
None, 0, auto_assign_nic=False,
block_device_mapping={"vda": "volume_id:::0"},
fakearg="f")
@mock.patch("rally_openstack.common.services.storage.block.BlockStorage")
def test_boot_server_from_volume_and_delete(self, mock_block_storage):
fake_server = object()
scenario = servers.BootServerFromVolumeAndDelete(
self.context, clients=mock.Mock())
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario.sleep_between = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
fake_volume = fakes.FakeVolumeManager().create()
fake_volume.id = "volume_id"
cinder = mock_block_storage.return_value
cinder.create_volume.return_value = fake_volume
scenario.run("img", 0, 5, None, 10, 20, fakearg="f")
cinder.create_volume.assert_called_once_with(5, imageRef="img",
volume_type=None)
scenario._boot_server.assert_called_once_with(
None, 0,
block_device_mapping={"vda": "volume_id:::0"},
fakearg="f")
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def _prepare_boot(self, nic=None, assert_nic=False):
fake_server = mock.MagicMock()
scenario = servers.BootServer(self.context)
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario.generate_random_name = mock.MagicMock(return_value="name")
kwargs = {"fakearg": "f"}
expected_kwargs = {"fakearg": "f"}
assert_nic = nic or assert_nic
if nic:
kwargs["nics"] = nic
if assert_nic:
self.clients("nova").networks.create("net-1")
expected_kwargs["nics"] = nic or [{"net-id": "net-2"}]
return scenario, kwargs, expected_kwargs
def _verify_boot_server(self, nic=None, assert_nic=False):
scenario, kwargs, expected_kwargs = self._prepare_boot(
nic=nic, assert_nic=assert_nic)
scenario.run("img", 0, **kwargs)
scenario._boot_server.assert_called_once_with(
"img", 0, auto_assign_nic=False, **expected_kwargs)
def test_boot_server_no_nics(self):
self._verify_boot_server(nic=None, assert_nic=False)
def test_boot_server_with_nic(self):
self._verify_boot_server(nic=[{"net-id": "net-1"}], assert_nic=True)
def test_snapshot_server(self):
fake_server = object()
fake_image = fakes.FakeImageManager()._create()
fake_image.id = "image_id"
scenario = servers.SnapshotServer(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._create_image = mock.MagicMock(return_value=fake_image)
scenario._delete_server = mock.MagicMock()
scenario._delete_image = mock.MagicMock()
scenario.run("i", 0, fakearg=2)
scenario._boot_server.assert_has_calls([
mock.call("i", 0, fakearg=2),
mock.call("image_id", 0, fakearg=2)])
scenario._create_image.assert_called_once_with(fake_server)
scenario._delete_server.assert_has_calls([
mock.call(fake_server, force=False),
mock.call(fake_server, force=False)])
scenario._delete_image.assert_called_once_with(fake_image)
def _test_resize(self, confirm=False):
fake_server = object()
fake_image = fakes.FakeImageManager()._create()
fake_image.id = "image_id"
flavor = mock.MagicMock()
to_flavor = mock.MagicMock()
scenario = servers.ResizeServer(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._resize_confirm = mock.MagicMock()
scenario._resize_revert = mock.MagicMock()
scenario._resize = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
kwargs = {"confirm": confirm}
scenario.run(fake_image, flavor, to_flavor, **kwargs)
scenario._resize.assert_called_once_with(fake_server, to_flavor)
if confirm:
scenario._resize_confirm.assert_called_once_with(fake_server)
else:
scenario._resize_revert.assert_called_once_with(fake_server)
def test_resize_with_confirm(self):
self._test_resize(confirm=True)
def test_resize_with_revert(self):
self._test_resize(confirm=False)
@ddt.data({"confirm": True},
{"confirm": False})
@ddt.unpack
def test_resize_shoutoff_server(self, confirm=False):
fake_server = object()
flavor = mock.MagicMock()
to_flavor = mock.MagicMock()
scenario = servers.ResizeShutoffServer(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._stop_server = mock.MagicMock()
scenario._resize_confirm = mock.MagicMock()
scenario._resize_revert = mock.MagicMock()
scenario._resize = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario.run("img", flavor, to_flavor, confirm=confirm)
scenario._boot_server.assert_called_once_with("img", flavor)
scenario._stop_server.assert_called_once_with(fake_server)
scenario._resize.assert_called_once_with(fake_server, to_flavor)
if confirm:
scenario._resize_confirm.assert_called_once_with(fake_server,
"SHUTOFF")
else:
scenario._resize_revert.assert_called_once_with(fake_server,
"SHUTOFF")
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
@ddt.data({"confirm": True, "do_delete": True},
{"confirm": False, "do_delete": True})
@ddt.unpack
@mock.patch("rally_openstack.common.services.storage.block.BlockStorage")
def test_boot_server_attach_created_volume_and_resize(
self, mock_block_storage, confirm=False, do_delete=False):
fake_volume = mock.MagicMock()
fake_server = mock.MagicMock()
flavor = mock.MagicMock()
to_flavor = mock.MagicMock()
fake_attachment = mock.MagicMock()
cinder = mock_block_storage.return_value
cinder.create_volume.return_value = fake_volume
scenario = servers.BootServerAttachCreatedVolumeAndResize(
self.context, clients=mock.Mock())
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._attach_volume = mock.MagicMock(return_value=fake_attachment)
scenario._resize_confirm = mock.MagicMock()
scenario._resize_revert = mock.MagicMock()
scenario._resize = mock.MagicMock()
scenario._detach_volume = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario.sleep_between = mock.MagicMock()
volume_size = 10
scenario.run("img", flavor, to_flavor, volume_size, min_sleep=10,
max_sleep=20, confirm=confirm, do_delete=do_delete)
scenario._boot_server.assert_called_once_with("img", flavor)
cinder.create_volume.assert_called_once_with(volume_size)
scenario._attach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._detach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._resize.assert_called_once_with(fake_server, to_flavor)
if confirm:
scenario._resize_confirm.assert_called_once_with(fake_server)
else:
scenario._resize_revert.assert_called_once_with(fake_server)
if do_delete:
scenario._detach_volume.assert_called_once_with(fake_server,
fake_volume)
cinder.delete_volume.assert_called_once_with(fake_volume)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
@mock.patch("rally_openstack.common.services.storage.block.BlockStorage")
def test_boot_server_attach_created_volume_and_extend(
self, mock_block_storage, do_delete=False):
fake_volume = mock.MagicMock()
fake_server = mock.MagicMock()
flavor = mock.MagicMock()
fake_attachment = mock.MagicMock()
cinder = mock_block_storage.return_value
cinder.create_volume.return_value = fake_volume
scenario = servers.BootServerAttachCreatedVolumeAndExtend(
self.context, clients=mock.Mock())
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._attach_volume = mock.MagicMock(return_value=fake_attachment)
scenario._detach_volume = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario.sleep_between = mock.MagicMock()
volume_size = 10
new_volume_size = 20
scenario.run("img", flavor, volume_size, new_volume_size,
min_sleep=10, max_sleep=20, do_delete=do_delete)
scenario._boot_server.assert_called_once_with("img", flavor)
cinder.create_volume.assert_called_once_with(volume_size)
scenario._attach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario.sleep_between.assert_called_once_with(10, 20)
cinder.extend_volume.assert_called_once_with(
fake_volume, new_size=new_volume_size)
if do_delete:
scenario._detach_volume.assert_called_once_with(fake_server,
fake_volume)
cinder.delete_volume.assert_called_once_with(fake_volume)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
@mock.patch("rally_openstack.common.services.storage.block.BlockStorage")
def test_list_attachments(self, mock_block_storage):
mock_volume_service = mock_block_storage.return_value
fake_volume = mock.MagicMock()
fake_server = mock.MagicMock()
flavor = mock.MagicMock()
fake_attachment = mock.MagicMock()
list_attachments = [mock.MagicMock(),
fake_attachment,
mock.MagicMock()]
context = self.context
context.update({
"admin": {
"id": "fake_user_id",
"credential": mock.MagicMock()
},
"user": {"id": "fake_user_id",
"credential": mock.MagicMock()},
"tenant": {"id": "fake", "name": "fake",
"volumes": [{"id": "uuid", "size": 1}],
"servers": [1]}})
scenario = servers.BootServerAttachVolumeAndListAttachments(
context)
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._attach_volume = mock.MagicMock()
scenario._list_attachments = mock.MagicMock()
mock_volume_service.create_volume.return_value = fake_volume
scenario._list_attachments.return_value = list_attachments
img_name = "img"
volume_size = 10
volume_num = 1
scenario._attach_volume.return_value = fake_attachment
scenario.run(img_name, flavor, volume_size, volume_num)
scenario._boot_server.assert_called_once_with(img_name, flavor)
mock_volume_service.create_volume.assert_called_once_with(volume_size)
scenario._attach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._list_attachments.assert_called_once_with(fake_server.id)
@mock.patch("rally_openstack.common.services.storage.block.BlockStorage")
def test_list_attachments_fails(self, mock_block_storage):
mock_volume_service = mock_block_storage.return_value
fake_volume = mock.MagicMock()
fake_server = mock.MagicMock()
flavor = mock.MagicMock()
fake_attachment = mock.MagicMock()
list_attachments = [mock.MagicMock(),
mock.MagicMock(),
mock.MagicMock()]
context = self.context
context.update({
"admin": {
"id": "fake_user_id",
"credential": mock.MagicMock()
},
"user": {"id": "fake_user_id",
"credential": mock.MagicMock()},
"tenant": {"id": "fake", "name": "fake",
"volumes": [{"id": "uuid", "size": 1}],
"servers": [1]}})
scenario = servers.BootServerAttachVolumeAndListAttachments(
context)
scenario._boot_server = mock.MagicMock(return_value=fake_server)
mock_volume_service.create_volume.return_value = fake_volume
scenario._attach_volume = mock.MagicMock()
scenario._list_attachments = mock.MagicMock()
scenario._attach_volume.return_value = fake_attachment
scenario._list_attachments.return_value = list_attachments
img_name = "img"
volume_size = 10
# Negative case: attachment not included into list of
# available attachments
self.assertRaises(rally_exceptions.RallyAssertionError,
scenario.run,
img_name, flavor, volume_size)
scenario._boot_server.assert_called_with(img_name, flavor)
mock_volume_service.create_volume.assert_called_with(volume_size)
scenario._attach_volume.assert_called_with(fake_server,
fake_volume)
scenario._list_attachments.assert_called_with(fake_server.id)
@ddt.data({"confirm": True, "do_delete": True},
{"confirm": False, "do_delete": True})
@ddt.unpack
@mock.patch("rally_openstack.common.services.storage.block.BlockStorage")
def test_boot_server_from_volume_and_resize(
self, mock_block_storage, confirm=False, do_delete=False):
fake_server = object()
flavor = mock.MagicMock()
to_flavor = mock.MagicMock()
scenario = servers.BootServerFromVolumeAndResize(self.context,
clients=mock.Mock())
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._resize_confirm = mock.MagicMock()
scenario._resize_revert = mock.MagicMock()
scenario._resize = mock.MagicMock()
scenario.sleep_between = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
fake_volume = fakes.FakeVolumeManager().create()
fake_volume.id = "volume_id"
cinder = mock_block_storage.return_value
cinder.create_volume.return_value = fake_volume
volume_size = 10
scenario.run("img", flavor, to_flavor, volume_size, min_sleep=10,
max_sleep=20, confirm=confirm, do_delete=do_delete)
cinder.create_volume.assert_called_once_with(10, imageRef="img")
scenario._boot_server.assert_called_once_with(
None, flavor,
block_device_mapping={"vda": "volume_id:::0"})
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._resize.assert_called_once_with(fake_server, to_flavor)
if confirm:
scenario._resize_confirm.assert_called_once_with(fake_server)
else:
scenario._resize_revert.assert_called_once_with(fake_server)
if do_delete:
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
def test_boot_and_live_migrate_server(self):
fake_server = mock.MagicMock()
scenario = servers.BootAndLiveMigrateServer(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario.sleep_between = mock.MagicMock()
scenario._live_migrate = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario.run("img", 0, min_sleep=10, max_sleep=20, fakearg="fakearg")
scenario._boot_server.assert_called_once_with("img", 0,
fakearg="fakearg")
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._live_migrate.assert_called_once_with(fake_server,
False, False)
scenario._delete_server.assert_called_once_with(fake_server)
@mock.patch("rally_openstack.common.services.storage.block.BlockStorage")
def test_boot_server_from_volume_and_live_migrate(self,
mock_block_storage):
fake_server = mock.MagicMock()
scenario = servers.BootServerFromVolumeAndLiveMigrate(
self.context, clients=mock.Mock())
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario.sleep_between = mock.MagicMock()
scenario._live_migrate = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
fake_volume = fakes.FakeVolumeManager().create()
fake_volume.id = "volume_id"
cinder = mock_block_storage.return_value
cinder.create_volume.return_value = fake_volume
scenario.run("img", 0, 5, volume_type=None,
min_sleep=10, max_sleep=20, fakearg="f")
cinder.create_volume.assert_called_once_with(5, imageRef="img",
volume_type=None)
scenario._boot_server.assert_called_once_with(
None, 0,
block_device_mapping={"vda": "volume_id:::0"},
fakearg="f")
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._live_migrate.assert_called_once_with(fake_server,
False, False)
scenario._delete_server.assert_called_once_with(fake_server,
force=False)
@mock.patch("rally_openstack.common.services.storage.block.BlockStorage")
def test_boot_server_attach_created_volume_and_live_migrate(
self, mock_block_storage):
fake_volume = mock.MagicMock()
fake_server = mock.MagicMock()
fake_attachment = mock.MagicMock()
clients = mock.Mock()
cinder = mock_block_storage.return_value
cinder.create_volume.return_value = fake_volume
scenario = servers.BootServerAttachCreatedVolumeAndLiveMigrate(
self.context, clients=clients)
scenario._attach_volume = mock.MagicMock(return_value=fake_attachment)
scenario._detach_volume = mock.MagicMock()
scenario.sleep_between = mock.MagicMock()
scenario._live_migrate = mock.MagicMock()
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
image = "img"
flavor = "flavor"
size = 5
boot_kwargs = {"some_var": "asd"}
scenario.run(image, flavor, size, min_sleep=10, max_sleep=20,
boot_server_kwargs=boot_kwargs)
scenario._boot_server.assert_called_once_with(image, flavor,
**boot_kwargs)
cinder.create_volume.assert_called_once_with(size)
scenario._attach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._detach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._live_migrate.assert_called_once_with(fake_server,
False, False)
cinder.delete_volume.assert_called_once_with(fake_volume)
scenario._delete_server.assert_called_once_with(fake_server)
def _test_boot_and_migrate_server(self, confirm=False):
fake_server = mock.MagicMock()
scenario = servers.BootAndMigrateServer(self.context)
scenario.generate_random_name = mock.MagicMock(return_value="name")
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._migrate = mock.MagicMock()
scenario._resize_confirm = mock.MagicMock()
scenario._resize_revert = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
kwargs = {"confirm": confirm}
scenario.run("img", 0, fakearg="fakearg", **kwargs)
scenario._boot_server.assert_called_once_with("img", 0,
fakearg="fakearg",
confirm=confirm)
scenario._migrate.assert_called_once_with(fake_server)
if confirm:
scenario._resize_confirm.assert_called_once_with(fake_server,
status="ACTIVE")
else:
scenario._resize_revert.assert_called_once_with(fake_server,
status="ACTIVE")
scenario._delete_server.assert_called_once_with(fake_server)
def test_boot_and_migrate_server_with_confirm(self):
self._test_boot_and_migrate_server(confirm=True)
def test_boot_and_migrate_server_with_revert(self):
self._test_boot_and_migrate_server(confirm=False)
def test_boot_and_rebuild_server(self):
scenario = servers.BootAndRebuildServer(self.context)
scenario._boot_server = mock.Mock()
scenario._rebuild_server = mock.Mock()
scenario._delete_server = mock.Mock()
from_image = "img1"
to_image = "img2"
flavor = "flavor"
scenario.run(from_image, to_image, flavor, fakearg="fakearg")
scenario._boot_server.assert_called_once_with(from_image, flavor,
fakearg="fakearg")
server = scenario._boot_server.return_value
scenario._rebuild_server.assert_called_once_with(server, to_image)
scenario._delete_server.assert_called_once_with(server)
def test_boot_and_show_server(self):
server = fakes.FakeServer()
image = fakes.FakeImage()
flavor = fakes.FakeFlavor()
scenario = servers.BootAndShowServer(self.context)
scenario._boot_server = mock.MagicMock(return_value=server)
scenario._show_server = mock.MagicMock()
scenario.run(image, flavor, fakearg="fakearg")
scenario._boot_server.assert_called_once_with(image, flavor,
fakearg="fakearg")
scenario._show_server.assert_called_once_with(server)
def test_boot_server_and_list_interfaces(self):
server = fakes.FakeServer()
image = fakes.FakeImage()
flavor = fakes.FakeFlavor()
scenario = servers.BootServerAndListInterfaces(self.context)
scenario._boot_server = mock.MagicMock(return_value=server)
scenario._list_interfaces = mock.MagicMock()
scenario.run(image, flavor, fakearg="fakearg")
scenario._boot_server.assert_called_once_with(image, flavor,
fakearg="fakearg")
scenario._list_interfaces.assert_called_once_with(server)
@ddt.data({"length": None},
{"length": 10})
@ddt.unpack
def test_boot_and_get_console_server(self, length):
server = fakes.FakeServer()
image = fakes.FakeImage()
flavor = fakes.FakeFlavor()
kwargs = {"fakearg": "fakearg"}
scenario = servers.BootAndGetConsoleOutput(self.context)
scenario._boot_server = mock.MagicMock(return_value=server)
scenario._get_server_console_output = mock.MagicMock()
scenario.run(image, flavor, length, **kwargs)
scenario._boot_server.assert_called_once_with(image, flavor,
**kwargs)
scenario._get_server_console_output.assert_called_once_with(server,
length)
def test_boot_and_get_console_url(self):
server = fakes.FakeServer()
image = fakes.FakeImage()
flavor = fakes.FakeFlavor()
kwargs = {"fakearg": "fakearg"}
scenario = servers.BootAndGetConsoleUrl(self.context)
scenario._boot_server = mock.MagicMock(return_value=server)
scenario._get_console_url_server = mock.MagicMock()
scenario.run(image, flavor, console_type="novnc", **kwargs)
scenario._boot_server.assert_called_once_with(image, flavor,
**kwargs)
scenario._get_console_url_server.assert_called_once_with(
server, "novnc")
def test_boot_and_associate_floating_ip(self):
clients = mock.MagicMock(credential=mock.MagicMock(api_info={}))
neutronclient = clients.neutron.return_value
floatingip = "floatingip"
neutronclient.create_floatingip.return_value = {
"floatingip": floatingip}
scenario = servers.BootAndAssociateFloatingIp(self.context,
clients=clients)
server = mock.Mock()
scenario._boot_server = mock.Mock(return_value=server)
scenario._associate_floating_ip = mock.Mock()
image = "img"
flavor = "flavor"
scenario.run(image, flavor, fakearg="fakearg")
scenario._boot_server.assert_called_once_with(image, flavor,
fakearg="fakearg")
neutronclient.create_floatingip.assert_called_once_with(
{"floatingip": mock.ANY}
)
scenario._associate_floating_ip.assert_called_once_with(
server, floatingip)
# check ext_network
neutronclient.list_networks.return_value = {
"networks": [
{"id": "id1", "name": "net1", "router:external": True},
{"id": "id2", "name": "net2", "router:external": True},
{"id": "id3", "name": "net3", "router:external": True},
]
}
neutronclient.create_floatingip.reset_mock()
# case 1: new argument is used
scenario.run(image, flavor, floating_network="net3")
neutronclient.create_floatingip.assert_called_once_with(
{"floatingip": {"description": mock.ANY,
"floating_network_id": "id3"}}
)
# case 2: new argument is transmitted with an old one
neutronclient.create_floatingip.reset_mock()
scenario.run(image, flavor, floating_network="net3",
create_floating_ip_args={"ext_network": "net2"})
neutronclient.create_floatingip.assert_called_once_with(
{"floatingip": {"description": mock.ANY,
"floating_network_id": "id3"}}
)
# case 3: new argument is transmitted with an semi-old one
neutronclient.create_floatingip.reset_mock()
scenario.run(image, flavor, floating_network="net3",
create_floating_ip_args={"floating_network": "net1"})
neutronclient.create_floatingip.assert_called_once_with(
{"floatingip": {"description": mock.ANY,
"floating_network_id": "id3"}}
)
# case 4: only old argument is transmitted
neutronclient.create_floatingip.reset_mock()
scenario.run(image, flavor,
create_floating_ip_args={"ext_network": "net2"})
neutronclient.create_floatingip.assert_called_once_with(
{"floatingip": {"description": mock.ANY,
"floating_network_id": "id2"}}
)
# case 5: only semi-old argument is transmitted
neutronclient.create_floatingip.reset_mock()
scenario.run(image, flavor,
create_floating_ip_args={"floating_network": "net1"})
neutronclient.create_floatingip.assert_called_once_with(
{"floatingip": {"description": mock.ANY,
"floating_network_id": "id1"}}
)
def test_boot_server_associate_and_dissociate_floating_ip(self):
clients = mock.MagicMock(credential=mock.MagicMock(api_info={}))
neutronclient = clients.neutron.return_value
floatingip = "floatingip"
neutronclient.create_floatingip.return_value = {
"floatingip": floatingip}
scenario = servers.BootServerAssociateAndDissociateFloatingIP(
self.context, clients=clients)
server = mock.Mock()
scenario._boot_server = mock.Mock(return_value=server)
scenario._associate_floating_ip = mock.Mock()
scenario._dissociate_floating_ip = mock.Mock()
image = "img"
flavor = "flavor"
scenario.run(image, flavor, fakearg="fakearg")
scenario._boot_server.assert_called_once_with(image, flavor,
fakearg="fakearg")
neutronclient.create_floatingip.assert_called_once_with(
{"floatingip": mock.ANY}
)
scenario._associate_floating_ip.assert_called_once_with(
server, floatingip)
scenario._dissociate_floating_ip.assert_called_once_with(
server, floatingip)
# check ext_network
neutronclient.list_networks.return_value = {
"networks": [
{"id": "id1", "name": "net1", "router:external": True},
{"id": "id2", "name": "net2", "router:external": True},
{"id": "id3", "name": "net3", "router:external": True},
]
}
neutronclient.create_floatingip.reset_mock()
# case 1: new argument is used
scenario.run(image, flavor, floating_network="net3")
neutronclient.create_floatingip.assert_called_once_with(
{"floatingip": {"description": mock.ANY,
"floating_network_id": "id3"}}
)
# case 2: new argument is transmitted with an old one
neutronclient.create_floatingip.reset_mock()
scenario.run(image, flavor, floating_network="net3",
create_floating_ip_args={"ext_network": "net2"})
neutronclient.create_floatingip.assert_called_once_with(
{"floatingip": {"description": mock.ANY,
"floating_network_id": "id3"}}
)
# case 3: new argument is transmitted with an semi-old one
neutronclient.create_floatingip.reset_mock()
scenario.run(image, flavor, floating_network="net3",
create_floating_ip_args={"floating_network": "net1"})
neutronclient.create_floatingip.assert_called_once_with(
{"floatingip": {"description": mock.ANY,
"floating_network_id": "id3"}}
)
# case 4: only old argument is transmitted
neutronclient.create_floatingip.reset_mock()
scenario.run(image, flavor,
create_floating_ip_args={"ext_network": "net2"})
neutronclient.create_floatingip.assert_called_once_with(
{"floatingip": {"description": mock.ANY,
"floating_network_id": "id2"}}
)
# case 5: only semi-old argument is transmitted
neutronclient.create_floatingip.reset_mock()
scenario.run(image, flavor,
create_floating_ip_args={"floating_network": "net1"})
neutronclient.create_floatingip.assert_called_once_with(
{"floatingip": {"description": mock.ANY,
"floating_network_id": "id1"}}
)
def test_boot_and_update_server(self):
scenario = servers.BootAndUpdateServer(self.context)
scenario._boot_server = mock.Mock()
scenario._update_server = mock.Mock()
scenario.run("img", "flavor", "desp", fakearg="fakearg")
scenario._boot_server.assert_called_once_with("img", "flavor",
fakearg="fakearg")
scenario._update_server.assert_called_once_with(
scenario._boot_server.return_value, "desp")
def test_boot_server_and_attach_interface(self):
network_create_args = {"router:external": True}
subnet_create_args = {"allocation_pools": []}
subnet_cidr_start = "10.1.0.0/16"
boot_server_args = {}
net = mock.MagicMock()
subnet = mock.MagicMock()
server = mock.MagicMock()
scenario = servers.BootServerAndAttachInterface(self.context)
scenario._get_or_create_network = mock.Mock(return_value=net)
scenario._create_subnet = mock.Mock(return_value=subnet)
scenario._boot_server = mock.Mock(return_value=server)
scenario._attach_interface = mock.Mock()
scenario.run("image", "flavor",
network_create_args=network_create_args,
subnet_create_args=subnet_create_args,
subnet_cidr_start=subnet_cidr_start,
boot_server_args=boot_server_args)
scenario._get_or_create_network.assert_called_once_with(
network_create_args)
scenario._create_subnet.assert_called_once_with(net,
subnet_create_args,
subnet_cidr_start)
scenario._boot_server.assert_called_once_with("image", "flavor",
**boot_server_args)
scenario._attach_interface.assert_called_once_with(
server, net_id=net["network"]["id"])
@mock.patch("rally_openstack.common.services.storage.block.BlockStorage")
def test_boot_server_from_volume_snapshot(self, mock_block_storage):
fake_volume = mock.MagicMock(id="volume_id")
fake_snapshot = mock.MagicMock(id="snapshot_id")
cinder = mock_block_storage.return_value
cinder.create_volume.return_value = fake_volume
cinder.create_snapshot.return_value = fake_snapshot
scenario = servers.BootServerFromVolumeSnapshot(self.context,
clients=mock.Mock())
scenario._boot_server = mock.MagicMock()
scenario.run("img", "flavor", 1, volume_type=None,
auto_assign_nic=False, fakearg="f")
cinder.create_volume.assert_called_once_with(1, imageRef="img",
volume_type=None)
cinder.create_snapshot.assert_called_once_with("volume_id",
force=False)
scenario._boot_server.assert_called_once_with(
None, "flavor", auto_assign_nic=False,
block_device_mapping={"vda": "snapshot_id:snap::1"},
fakearg="f")
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,719
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/nova/test_server_groups.py
|
# Copyright 2017: Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
from rally import exceptions as rally_exceptions
from rally_openstack.task.scenarios.nova import server_groups
from tests.unit import test
SERVER_GROUPS_MODULE = "rally_openstack.task.scenarios.nova.server_groups"
NOVA_SERVER_GROUPS = SERVER_GROUPS_MODULE + ".NovaServerGroups"
@ddt.ddt
class NovaServerGroupsTestCase(test.ScenarioTestCase):
def test_create_and_list_server_groups(self):
scenario = server_groups.CreateAndListServerGroups(self.context)
fake_server_group = mock.MagicMock()
all_projects = False
scenario._create_server_group = mock.MagicMock()
scenario._list_server_groups = mock.MagicMock()
scenario._list_server_groups.return_value = [mock.MagicMock(),
fake_server_group,
mock.MagicMock()]
# Positive case and kwargs is None
scenario._create_server_group.return_value = fake_server_group
scenario.run(policies="fake_policy", all_projects=False, kwargs=None)
kwargs = {
"policies": "fake_policy"
}
scenario._create_server_group.assert_called_once_with(**kwargs)
scenario._list_server_groups.assert_called_once_with(all_projects)
# Positive case and kwargs is not None
foo_kwargs = {
"policies": "fake_policy"
}
scenario._create_server_group.return_value = fake_server_group
scenario.run(policies=None, all_projects=False,
kwargs=foo_kwargs)
scenario._create_server_group.assert_called_with(**foo_kwargs)
scenario._list_server_groups.assert_called_with(all_projects)
# Negative case1: server group isn't created
scenario._create_server_group.return_value = None
self.assertRaises(rally_exceptions.RallyAssertionError,
scenario.run,
**kwargs)
scenario._create_server_group.assert_called_with(**kwargs)
# Negative case2: server group not in the list of available server
# groups
scenario._create_server_group.return_value = mock.MagicMock()
self.assertRaises(rally_exceptions.RallyAssertionError,
scenario.run,
**kwargs)
scenario._create_server_group.assert_called_with(**kwargs)
scenario._list_server_groups.assert_called_with(all_projects)
def test_create_and_get_server_group_positive(self):
scenario = server_groups.CreateAndGetServerGroup(self.context)
fake_server_group = mock.MagicMock()
fake_server_group_info = mock.MagicMock()
fake_server_group.id = 123
fake_server_group_info.id = 123
scenario._create_server_group = mock.MagicMock()
scenario._get_server_group = mock.MagicMock()
# Positive case and kwargs is None
kwargs = {
"policies": "fake_policy"
}
scenario._create_server_group.return_value = fake_server_group
scenario._get_server_group.return_value = fake_server_group_info
scenario.run(policies="fake_policy", kwargs=None)
scenario._create_server_group.assert_called_once_with(**kwargs)
scenario._get_server_group.assert_called_once_with(
fake_server_group.id)
# Positive case and kwargs is not None
scenario._create_server_group.return_value = fake_server_group
scenario._get_server_group.return_value = fake_server_group_info
foo_kwargs = {
"policies": "fake_policy"
}
scenario.run(policies=None, kwargs=foo_kwargs)
scenario._create_server_group.assert_called_with(**foo_kwargs)
scenario._get_server_group.assert_called_with(
fake_server_group.id)
def test_create_and_get_server_group_negative(self):
scenario = server_groups.CreateAndGetServerGroup(self.context)
fake_server_group = mock.MagicMock()
fake_server_group_info = mock.MagicMock()
fake_server_group.id = 123
fake_server_group_info.id = 123
kwargs = {
"policies": "fake_policy"
}
scenario._create_server_group = mock.MagicMock()
scenario._get_server_group = mock.MagicMock()
# Negative case1: server group isn't created
scenario._create_server_group.return_value = None
self.assertRaises(rally_exceptions.RallyAssertionError,
scenario.run,
**kwargs)
scenario._create_server_group.assert_called_with(**kwargs)
# Negative case2: server group to get information not the created one
fake_server_group_info.id = 456
scenario._create_server_group.return_value = fake_server_group
self.assertRaises(rally_exceptions.RallyAssertionError,
scenario.run,
**kwargs)
scenario._create_server_group.assert_called_with(**kwargs)
scenario._get_server_group.assert_called_with(
fake_server_group.id)
def test_create_and_delete_server_group(self):
scenario = server_groups.CreateAndDeleteServerGroup(self.context)
fake_server_group = mock.MagicMock()
scenario._create_server_group = mock.MagicMock()
scenario._delete_server_group = mock.MagicMock()
# Positive case and kwargs is None
kwargs = {
"policies": "fake_policy"
}
scenario._create_server_group.return_value = fake_server_group
scenario.run(policies="fake_policy", kwargs=None)
scenario._create_server_group.assert_called_once_with(**kwargs)
scenario._delete_server_group.assert_called_once_with(
fake_server_group.id)
# Positive case and kwargs is not None
scenario._create_server_group.return_value = fake_server_group
foo_kwargs = {
"policies": "fake_policy"
}
scenario.run(policies=None, kwargs=foo_kwargs)
scenario._create_server_group.assert_called_with(**foo_kwargs)
scenario._delete_server_group.assert_called_with(
fake_server_group.id)
# Negative case: server group isn't created
scenario._create_server_group.return_value = None
self.assertRaises(rally_exceptions.RallyAssertionError,
scenario.run,
**kwargs)
scenario._create_server_group.assert_called_with(**kwargs)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,720
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/swift/test_objects.py
|
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
from rally_openstack.task.scenarios.swift import objects
from tests.unit import test
@ddt.ddt
class SwiftObjectsTestCase(test.ScenarioTestCase):
def test_create_container_and_object_then_list_objects(self):
scenario = objects.CreateContainerAndObjectThenListObjects(
self.context)
scenario._create_container = mock.MagicMock(return_value="AA")
scenario._upload_object = mock.MagicMock()
scenario._list_objects = mock.MagicMock()
scenario.run(objects_per_container=5, object_size=100)
self.assertEqual(1, scenario._create_container.call_count)
self.assertEqual(5, scenario._upload_object.call_count)
scenario._list_objects.assert_called_once_with("AA")
def test_create_container_and_object_then_delete_all(self):
scenario = objects.CreateContainerAndObjectThenDeleteAll(self.context)
scenario._create_container = mock.MagicMock(return_value="BB")
scenario._upload_object = mock.MagicMock(
side_effect=[("etaaag", "ooobj_%i" % i) for i in range(3)])
scenario._delete_object = mock.MagicMock()
scenario._delete_container = mock.MagicMock()
scenario.run(objects_per_container=3, object_size=10)
self.assertEqual(1, scenario._create_container.call_count)
self.assertEqual(3, scenario._upload_object.call_count)
scenario._delete_object.assert_has_calls(
[mock.call("BB", "ooobj_%i" % i) for i in range(3)])
scenario._delete_container.assert_called_once_with("BB")
def test_create_container_and_object_then_download_object(self):
scenario = objects.CreateContainerAndObjectThenDownloadObject(
self.context
)
scenario._create_container = mock.MagicMock(return_value="CC")
scenario._upload_object = mock.MagicMock(
side_effect=[("etaaaag", "obbbj_%i" % i) for i in range(2)])
scenario._download_object = mock.MagicMock()
scenario.run(objects_per_container=2, object_size=50)
self.assertEqual(1, scenario._create_container.call_count)
self.assertEqual(2, scenario._upload_object.call_count)
scenario._download_object.assert_has_calls(
[mock.call("CC", "obbbj_%i" % i) for i in range(2)])
@ddt.data(1, 5)
def test_list_objects_in_containers(self, num_cons):
con_list = [{"name": "cooon_%s" % i} for i in range(num_cons)]
scenario = objects.ListObjectsInContainers(self.context)
scenario._list_containers = mock.MagicMock(return_value=("header",
con_list))
scenario._list_objects = mock.MagicMock()
scenario.run()
scenario._list_containers.assert_called_once_with()
con_calls = [mock.call(container["name"])
for container in con_list]
scenario._list_objects.assert_has_calls(con_calls)
@ddt.data([1, 1], [1, 2], [2, 1], [3, 5])
@ddt.unpack
def test_list_and_download_objects_in_containers(self, num_cons, num_objs):
con_list = [{"name": "connn_%s" % i} for i in range(num_cons)]
obj_list = [{"name": "ooobj_%s" % i} for i in range(num_objs)]
scenario = objects.ListAndDownloadObjectsInContainers(self.context)
scenario._list_containers = mock.MagicMock(return_value=("header",
con_list))
scenario._list_objects = mock.MagicMock(return_value=("header",
obj_list))
scenario._download_object = mock.MagicMock()
scenario.run()
scenario._list_containers.assert_called_once_with()
con_calls = [mock.call(container["name"])
for container in con_list]
scenario._list_objects.assert_has_calls(con_calls)
obj_calls = []
for container in con_list:
for obj in obj_list:
obj_calls.append(mock.call(container["name"], obj["name"]))
scenario._download_object.assert_has_calls(obj_calls, any_order=True)
def test_functional_create_container_and_object_then_list_objects(self):
names_list = ["AA", "BB", "CC", "DD"]
scenario = objects.CreateContainerAndObjectThenListObjects(
self.context)
scenario.generate_random_name = mock.MagicMock(side_effect=names_list)
scenario._list_objects = mock.MagicMock()
scenario.run(objects_per_container=3, object_size=100)
scenario._list_objects.assert_called_once_with("AA")
def test_functional_create_container_and_object_then_delete_all(self):
names_list = ["111", "222", "333", "444", "555"]
scenario = objects.CreateContainerAndObjectThenDeleteAll(self.context)
scenario.generate_random_name = mock.MagicMock(side_effect=names_list)
scenario._delete_object = mock.MagicMock()
scenario._delete_container = mock.MagicMock()
scenario.run(objects_per_container=4, object_size=240)
scenario._delete_object.assert_has_calls(
[mock.call("111", name) for name in names_list[1:]])
scenario._delete_container.assert_called_once_with("111")
def test_functional_create_container_and_object_then_download_object(self):
names_list = ["aaa", "bbb", "ccc", "ddd", "eee", "fff"]
scenario = objects.CreateContainerAndObjectThenDownloadObject(
self.context)
scenario.generate_random_name = mock.MagicMock(side_effect=names_list)
scenario._download_object = mock.MagicMock()
scenario.run(objects_per_container=5, object_size=750)
scenario._download_object.assert_has_calls(
[mock.call("aaa", name) for name in names_list[1:]])
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,721
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/gnocchi/test_utils.py
|
# Copyright 2017 Red Hat, Inc. <http://www.redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.scenarios.gnocchi import utils
from tests.unit import test
class GnocchiBaseTestCase(test.ScenarioTestCase):
def setUp(self):
super(GnocchiBaseTestCase, self).setUp()
self.context = super(GnocchiBaseTestCase, self).get_test_context()
self.context.update({
"admin": {
"id": "fake_user_id",
"credential": mock.MagicMock()
},
"user": {
"id": "fake_user_id",
"credential": mock.MagicMock()
},
"tenant": {"id": "fake_tenant_id",
"name": "fake_tenant_name"}
})
patch = mock.patch(
"rally_openstack.common.services.gnocchi.metric.GnocchiService")
self.addCleanup(patch.stop)
self.mock_service = patch.start()
def test__gnocchi_base(self):
base = utils.GnocchiBase(self.context)
self.assertEqual(base.admin_gnocchi,
self.mock_service.return_value)
self.assertEqual(base.gnocchi,
self.mock_service.return_value)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,722
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/common/services/identity/keystone_v3.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import logging
from rally import exceptions
from rally.task import atomic
from rally_openstack.common import service
from rally_openstack.common.services.identity import identity
from rally_openstack.common.services.identity import keystone_common
LOG = logging.getLogger(__name__)
@service.service("keystone", service_type="identity", version="3")
class KeystoneV3Service(service.Service, keystone_common.KeystoneMixin):
def _get_domain_id(self, domain_name_or_id):
from keystoneclient import exceptions as kc_exceptions
try:
# First try to find domain by ID
return self._clients.keystone("3").domains.get(
domain_name_or_id).id
except kc_exceptions.NotFound:
# Domain not found by ID, try to find it by name
domains = self._clients.keystone("3").domains.list(
name=domain_name_or_id)
if domains:
return domains[0].id
# Domain not found by name
raise exceptions.GetResourceNotFound(
resource="KeystoneDomain(%s)" % domain_name_or_id)
@atomic.action_timer("keystone_v3.create_project")
def create_project(self, project_name=None, domain_name="Default"):
project_name = project_name or self.generate_random_name()
domain_id = self._get_domain_id(domain_name)
return self._clients.keystone("3").projects.create(name=project_name,
domain=domain_id)
@atomic.action_timer("keystone_v3.update_project")
def update_project(self, project_id, name=None, enabled=None,
description=None):
"""Update tenant name and description.
:param project_id: Id of project to update
:param name: project name to be set (if boolean True, random name will
be set)
:param enabled: enabled status of project
:param description: project description to be set (if boolean True,
random description will be set)
"""
if name is True:
name = self.generate_random_name()
if description is True:
description = self.generate_random_name()
self._clients.keystone("3").projects.update(
project_id, name=name, description=description, enabled=enabled)
@atomic.action_timer("keystone_v3.delete_project")
def delete_project(self, project_id):
self._clients.keystone("3").projects.delete(project_id)
@atomic.action_timer("keystone_v3.list_projects")
def list_projects(self):
return self._clients.keystone("3").projects.list()
@atomic.action_timer("keystone_v3.get_project")
def get_project(self, project_id):
"""Get project."""
return self._clients.keystone("3").projects.get(project_id)
@atomic.action_timer("keystone_v3.create_user")
def create_user(self, username=None, password=None, project_id=None,
domain_name="Default", enabled=True,
default_role="member"):
"""Create user.
:param username: name of user
:param password: user password
:param project_id: user's default project
:param domain_name: Name or id of domain where to create project.
:param enabled: whether the user is enabled.
:param default_role: user's default role
"""
domain_id = self._get_domain_id(domain_name)
username = username or self.generate_random_name()
user = self._clients.keystone("3").users.create(
name=username, password=password, default_project=project_id,
domain=domain_id, enabled=enabled)
if project_id:
# we can't setup role without project_id
roles = self.list_roles()
for role in roles:
if default_role == role.name.lower():
self.add_role(role_id=role.id,
user_id=user.id,
project_id=project_id)
return user
for role in roles:
if default_role == role.name.lower().strip("_"):
self.add_role(role_id=role.id,
user_id=user.id,
project_id=project_id)
return user
LOG.warning("Unable to set %s role to created user." %
default_role)
return user
@atomic.action_timer("keystone_v3.create_users")
def create_users(self, project_id, number_of_users, user_create_args=None):
"""Create specified amount of users.
:param project_id: Id of project
:param number_of_users: number of users to create
:param user_create_args: additional user creation arguments
"""
users = []
for _i in range(number_of_users):
users.append(self.create_user(project_id=project_id,
**(user_create_args or {})))
return users
@atomic.action_timer("keystone_v3.update_user")
def update_user(self, user_id, name=None, domain_name=None,
project_id=None, password=None, email=None,
description=None, enabled=None, default_project=None):
domain = None
if domain_name:
domain = self._get_domain_id(domain_name)
self._clients.keystone("3").users.update(
user_id, name=name, domain=domain, project=project_id,
password=password, email=email, description=description,
enabled=enabled, default_project=default_project)
@atomic.action_timer("keystone_v3.create_service")
def create_service(self, name=None, service_type=None, description=None,
enabled=True):
"""Creates keystone service.
:param name: name of service to create
:param service_type: type of the service
:param description: description of the service
:param enabled: whether the service appears in the catalog
:returns: keystone service instance
"""
name = name or self.generate_random_name()
service_type = service_type or "rally_test_type"
description = description or self.generate_random_name()
return self._clients.keystone("3").services.create(
name, type=service_type, description=description, enabled=enabled)
@atomic.action_timer("keystone_v3.create_role")
def create_role(self, name=None, domain_name=None):
domain_id = None
if domain_name:
domain_id = self._get_domain_id(domain_name)
name = name or self.generate_random_name()
return self._clients.keystone("3").roles.create(name, domain=domain_id)
@atomic.action_timer("keystone_v3.add_role")
def add_role(self, role_id, user_id, project_id):
self._clients.keystone("3").roles.grant(role=role_id,
user=user_id,
project=project_id)
@atomic.action_timer("keystone_v3.list_roles")
def list_roles(self, user_id=None, project_id=None, domain_name=None):
"""List all roles."""
domain_id = None
if domain_name:
domain_id = self._get_domain_id(domain_name)
return self._clients.keystone("3").roles.list(user=user_id,
project=project_id,
domain=domain_id)
@atomic.action_timer("keystone_v3.revoke_role")
def revoke_role(self, role_id, user_id, project_id):
self._clients.keystone("3").roles.revoke(role=role_id,
user=user_id,
project=project_id)
@atomic.action_timer("keystone_v3.create_domain")
def create_domain(self, name, description=None, enabled=True):
return self._clients.keystone("3").domains.create(
name, description=description, enabled=enabled)
@atomic.action_timer("keystone_v3.create_ec2creds")
def create_ec2credentials(self, user_id, project_id):
"""Create ec2credentials.
:param user_id: User ID for which to create credentials
:param project_id: Tenant ID for which to create credentials
:returns: Created ec2-credentials object
"""
return self._clients.keystone("3").ec2.create(user_id,
project_id=project_id)
@service.compat_layer(KeystoneV3Service)
class UnifiedKeystoneV3Service(keystone_common.UnifiedKeystoneMixin,
identity.Identity):
@staticmethod
def _unify_project(project):
return identity.Project(id=project.id, name=project.name,
domain_id=project.domain_id)
@staticmethod
def _unify_user(user):
# When user has default_project_id that is None user.default_project_id
# will raise AttributeError
project_id = getattr(user, "project_id",
getattr(user, "default_project_id", None))
return identity.User(id=user.id, name=user.name, project_id=project_id,
domain_id=user.domain_id)
def create_project(self, project_name=None, domain_name="Default"):
"""Creates new project/tenant and return project object.
:param project_name: Name of project to be created.
:param domain_name: Name or id of domain where to create project,
"""
project = self._impl.create_project(project_name,
domain_name=domain_name)
return self._unify_project(project)
def update_project(self, project_id, name=None, enabled=None,
description=None):
"""Update project name, enabled and description
:param project_id: Id of project to update
:param name: project name to be set
:param enabled: enabled status of project
:param description: project description to be set
"""
self._impl.update_project(project_id=project_id, name=name,
enabled=enabled, description=description)
def delete_project(self, project_id):
"""Deletes project."""
return self._impl.delete_project(project_id)
def list_projects(self):
"""List all projects."""
return [self._unify_project(p) for p in self._impl.list_projects()]
def get_project(self, project_id):
"""Get project."""
return self._unify_project(self._impl.get_project(project_id))
def create_user(self, username=None, password=None, project_id=None,
domain_name="Default", enabled=True,
default_role="member"):
"""Create user.
:param username: name of user
:param password: user password
:param project_id: user's default project
:param domain_name: Name or id of domain where to create project,
:param enabled: whether the user is enabled.
:param default_role: Name of default user's role
"""
return self._unify_user(self._impl.create_user(
username=username, password=password, project_id=project_id,
domain_name=domain_name, default_role=default_role,
enabled=enabled))
def create_users(self, project_id, number_of_users, user_create_args=None):
"""Create specified amount of users.
:param project_id: Id of project
:param number_of_users: number of users to create
:param user_create_args: additional user creation arguments
"""
return [self._unify_user(u)
for u in self._impl.create_users(
project_id=project_id, number_of_users=number_of_users,
user_create_args=user_create_args)]
def list_users(self):
"""List all users."""
return [self._unify_user(u) for u in self._impl.list_users()]
def update_user(self, user_id, enabled=None, name=None, email=None,
password=None):
return self._impl.update_user(user_id, enabled=enabled, name=name,
email=email, password=password)
def list_services(self):
"""List all services."""
return [self._unify_service(s) for s in self._impl.list_services()]
def create_role(self, name=None, domain_name=None):
"""Add role to user."""
return self._unify_role(self._impl.create_role(
name, domain_name=domain_name))
def add_role(self, role_id, user_id, project_id):
"""Add role to user."""
self._impl.add_role(role_id=role_id, user_id=user_id,
project_id=project_id)
def revoke_role(self, role_id, user_id, project_id):
"""Revokes a role from a user."""
return self._impl.revoke_role(role_id=role_id, user_id=user_id,
project_id=project_id)
def list_roles(self, user_id=None, project_id=None, domain_name=None):
"""List all roles."""
return [self._unify_role(role) for role in self._impl.list_roles(
user_id=user_id, project_id=project_id, domain_name=domain_name)]
def create_ec2credentials(self, user_id, project_id):
"""Create ec2credentials.
:param user_id: User ID for which to create credentials
:param project_id: Project ID for which to create credentials
:returns: Created ec2-credentials object
"""
return self._impl.create_ec2credentials(user_id=user_id,
project_id=project_id)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,723
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/test__compat.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import warnings
from tests.unit import test
class CompatibilityTestCase(test.TestCase):
def test_old_imports_work(self):
with warnings.catch_warnings(record=True) as ctx:
warnings.simplefilter("always")
from rally_openstack import osclients
if not ctx:
self.fail("`rally_openstack._compat` should raise a warning.")
self.assertEqual(1, len(ctx))
catched_warning = ctx[0]
self.assertEqual(
"Module rally_openstack.osclients is deprecated since "
"rally-openstack 2.0.0. Use rally_openstack.common.osclients "
"instead.",
# catched_warning.message is an instance of an exception
str(catched_warning.message))
from rally_openstack.common import osclients as right_osclients
expected = set(o for o in dir(right_osclients)
if not o.startswith("_"))
actual = set(o for o in dir(osclients) if not o.startswith("_"))
self.assertEqual(expected, actual)
self.assertEqual(right_osclients.Clients, osclients.Clients)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,724
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/test_workarounds.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
rally-openstack package should not be aligned to one constant version of Rally
framework. It means that some workarounds for compatibility stuff are
provided.
This module should contain historical notes and checks to do not forget remove
these workaround.
"""
import pkg_resources
from tests.unit import test
class WorkaroundTestCase(test.TestCase):
WORKAROUNDS = []
def get_min_required_version(self):
package = pkg_resources.get_distribution("rally-openstack")
requirement = [p for p in package.requires() if p.name == "rally"][0]
for statement, version in requirement.specs:
version = [int(i) for i in version.split(".")]
if statement == ">=":
return version
elif statement == ">":
version[-1] += 1
return version
self.skipTest("Failed to get a minimum required version of Rally "
"framework.")
def test_rally_version(self):
rally_version = self.get_min_required_version()
for version, workarounds in self.WORKAROUNDS:
if rally_version >= version:
self.fail(
"After bumping minimum required version of Rally, some "
"workarounds become redundant. See the following list and "
"update the code: \n\t%s" % "\n\t".join(workarounds))
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,725
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/mistral/test_utils.py
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally_openstack.task.scenarios.mistral import utils
from tests.unit import fakes
from tests.unit import test
MISTRAL_UTILS = "rally_openstack.task.scenarios.mistral.utils"
PARAMS_EXAMPLE = {"env": {"env_param": "param_value"}}
INPUT_EXAMPLE = """{"input1": "value1", "some_json_input": {"a": "b"}}"""
class MistralScenarioTestCase(test.ScenarioTestCase):
def test_list_workbooks(self):
scenario = utils.MistralScenario(context=self.context)
return_wbs_list = scenario._list_workbooks()
self.assertEqual(
self.clients("mistral").workbooks.list.return_value,
return_wbs_list)
self._test_atomic_action_timer(
scenario.atomic_actions(),
"mistral.list_workbooks"
)
def test_create_workbook(self):
definition = "version: \"2.0\"\nname: wb"
scenario = utils.MistralScenario(context=self.context)
self.assertEqual(
self.clients("mistral").workbooks.create.return_value,
scenario._create_workbook(definition)
)
self._test_atomic_action_timer(
scenario.atomic_actions(),
"mistral.create_workbook"
)
def test_delete_workbook(self):
scenario = utils.MistralScenario(context=self.context)
scenario._delete_workbook("wb_name")
self.clients("mistral").workbooks.delete.assert_called_once_with(
"wb_name",
namespace=""
)
self._test_atomic_action_timer(
scenario.atomic_actions(),
"mistral.delete_workbook"
)
def test_list_executions(self):
scenario = utils.MistralScenario(context=self.context)
return_executions_list = scenario._list_executions()
self.assertEqual(
return_executions_list,
self.clients("mistral").executions.list.return_value
)
self._test_atomic_action_timer(
scenario.atomic_actions(),
"mistral.list_executions"
)
def test_create_execution(self):
scenario = utils.MistralScenario(context=self.context)
namespace = "namespace"
wf_name = "fake_wf_name"
mock_wait_for_status = self.mock_wait_for_status.mock
mock_create_exec = self.clients("mistral").executions.create
self.assertEqual(
mock_wait_for_status.return_value,
scenario._create_execution("%s" % wf_name, namespace=namespace)
)
mock_create_exec.assert_called_once_with(
wf_name,
workflow_input=None,
namespace=namespace
)
args, kwargs = mock_wait_for_status.call_args
self.assertEqual(mock_create_exec.return_value, args[0])
self.assertEqual(["ERROR"], kwargs["failure_statuses"])
self.assertEqual(["SUCCESS"], kwargs["ready_statuses"])
self._test_atomic_action_timer(
scenario.atomic_actions(),
"mistral.create_execution"
)
def test_create_execution_with_input(self):
scenario = utils.MistralScenario(context=self.context)
mock_wait_for_status = self.mock_wait_for_status.mock
wf_name = "fake_wf_name"
mock_create_exec = self.clients("mistral").executions.create
self.assertEqual(
mock_wait_for_status.return_value,
scenario._create_execution(
wf_name, wf_input=str(INPUT_EXAMPLE))
)
mock_create_exec.assert_called_once_with(
wf_name,
workflow_input=INPUT_EXAMPLE,
namespace=""
)
def test_create_execution_with_params(self):
scenario = utils.MistralScenario(context=self.context)
mock_wait_for_status = self.mock_wait_for_status.mock
wf_name = "fake_wf_name"
mock_create_exec = self.clients("mistral").executions.create
self.assertEqual(
mock_wait_for_status.return_value,
scenario._create_execution(
wf_name, **PARAMS_EXAMPLE)
)
mock_create_exec.assert_called_once_with(
wf_name,
workflow_input=None,
namespace="",
**PARAMS_EXAMPLE
)
args, kwargs = mock_wait_for_status.call_args
self.assertEqual(mock_create_exec.return_value, args[0])
self.assertEqual(["ERROR"], kwargs["failure_statuses"])
self.assertEqual(["SUCCESS"], kwargs["ready_statuses"])
self._test_atomic_action_timer(
scenario.atomic_actions(),
"mistral.create_execution"
)
args, kwargs = mock_wait_for_status.call_args
self.assertEqual(mock_create_exec.return_value, args[0])
self.assertEqual(["ERROR"], kwargs["failure_statuses"])
self.assertEqual(["SUCCESS"], kwargs["ready_statuses"])
self._test_atomic_action_timer(
scenario.atomic_actions(),
"mistral.create_execution"
)
def test_delete_execution(self):
scenario = utils.MistralScenario(context=self.context)
execution = fakes.FakeMistralClient().execution.create()
scenario._delete_execution(execution)
self.clients("mistral").executions.delete.assert_called_once_with(
execution.id
)
self._test_atomic_action_timer(
scenario.atomic_actions(),
"mistral.delete_execution"
)
def test_create_workflow(self):
scenario = utils.MistralScenario(context=self.context)
definition = """
wf:
type: direct
tasks:
task1:
action: std.noop
"""
self.assertEqual(
self.clients("mistral").workflows.create.return_value,
scenario._create_workflow(definition)
)
self._test_atomic_action_timer(
scenario.atomic_actions(),
"mistral.create_workflow"
)
def test_delete_workflow(self):
wf_identifier = "wf_identifier"
namespace = "delete_wf_test"
scenario = utils.MistralScenario(context=self.context)
scenario._delete_workflow(wf_identifier, namespace=namespace)
self.clients("mistral").workflows.delete.assert_called_once_with(
wf_identifier,
namespace=namespace
)
self._test_atomic_action_timer(
scenario.atomic_actions(),
"mistral.delete_workflow"
)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,726
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/neutron/utils.py
|
# Copyright 2014: Intel Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from rally.common import cfg
from rally.common import logging
from rally import exceptions
from rally.task import atomic
from rally.task import utils
from rally_openstack.common.services.network import neutron
from rally_openstack.task import scenario
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class NeutronBaseScenario(scenario.OpenStackScenario):
"""Base class for Neutron scenarios with basic atomic actions."""
def __init__(self, *args, **kwargs):
super(NeutronBaseScenario, self).__init__(*args, **kwargs)
if hasattr(self, "_clients"):
self.neutron = neutron.NeutronService(
clients=self._clients,
name_generator=self.generate_random_name,
atomic_inst=self.atomic_actions()
)
if hasattr(self, "_admin_clients"):
self.admin_neutron = neutron.NeutronService(
clients=self._admin_clients,
name_generator=self.generate_random_name,
atomic_inst=self.atomic_actions()
)
def _get_or_create_network(self, **network_create_args):
"""Get a network from context, or create a new one.
This lets users either create networks with the 'network'
context, provide existing networks with the 'existing_network'
context, or let the scenario create a default network for
them.
"""
if "networks" in self.context["tenant"]:
networks = self.context["tenant"]["networks"]
net_idx = self.context["iteration"] % len(networks)
return networks[net_idx]
else:
LOG.warning("Running this scenario without either the "
"'network@openstack' or 'existing_network@openstack' "
"context is deprecated since Rally-OpenStack 2.0.0.")
return self.neutron.create_network(**network_create_args)
class NeutronScenario(NeutronBaseScenario):
"""Base class for Neutron scenarios with basic atomic actions."""
# TODO(rkiran): modify in case LBaaS-v2 requires
LB_METHOD = "ROUND_ROBIN"
LB_PROTOCOL = "HTTP"
LB_PROTOCOL_PORT = 80
HM_TYPE = "PING"
HM_MAX_RETRIES = 3
HM_DELAY = 20
HM_TIMEOUT = 10
def _get_network_id(self, network, **kwargs):
"""Get Neutron network ID for the network name.
:param network: str, network name/id
:param kwargs: dict, network options
:returns: str, Neutron network-id
"""
try:
return self.neutron.find_network(network)["id"]
except exceptions.GetResourceFailure:
raise exceptions.NotFoundException(
message="Network %s not found." % network)
@property
def _ext_gw_mode_enabled(self):
"""Determine if the ext-gw-mode extension is enabled.
Without this extension, we can't pass the enable_snat parameter.
"""
return self.neutron.supports_extension("ext-gw-mode", silent=True)
def _create_network(self, network_create_args):
"""Create neutron network.
:param network_create_args: dict, POST /v2.0/networks request options
:returns: neutron network dict
"""
network_create_args.pop("name", None)
return {"network": self.neutron.create_network(**network_create_args)}
def _list_networks(self, **kwargs):
"""Return user networks list.
:param kwargs: network list options
"""
return self.neutron.list_networks(**kwargs)
def _list_agents(self, **kwargs):
"""Fetches agents.
:param kwargs: neutron agent list options
:returns: user agents list
"""
return self.neutron.list_agents(**kwargs)
def _update_network(self, network, network_update_args):
"""Update the network.
This atomic function updates the network with network_update_args.
:param network: Network object
:param network_update_args: dict, POST /v2.0/networks update options
:returns: updated neutron network dict
"""
network_update_args["name"] = self.generate_random_name()
return {"network": self.neutron.update_network(
network["network"]["id"], **network_update_args)}
def _show_network(self, network, **kwargs):
"""show network details.
:param network: Network object
:param kwargs: dict, POST /v2.0/networks show options
:returns: details of the network
"""
network = self.neutron.get_network(network["network"]["id"], **kwargs)
return {"network": network}
def _delete_network(self, network):
"""Delete neutron network.
:param network: Network object
"""
self.neutron.delete_network(network["id"])
def _create_subnet(self, network, subnet_create_args, start_cidr=None):
"""Create neutron subnet.
:param network: neutron network dict
:param subnet_create_args: POST /v2.0/subnets request options
:returns: neutron subnet dict
"""
subnet_create_args.pop("name", None)
subnet_create_args["network_id"] = network["network"]["id"]
subnet_create_args["start_cidr"] = start_cidr
return {"subnet": self.neutron.create_subnet(**subnet_create_args)}
def _list_subnets(self):
"""Returns user subnetworks list."""
return self.neutron.list_subnets()
def _show_subnet(self, subnet, **kwargs):
"""show subnet details.
:param subnet: Subnet object
:param kwargs: Optional additional arguments for subnet show
:returns: details of the subnet
"""
return {"subnet": self.neutron.get_subnet(subnet["subnet"]["id"])}
def _update_subnet(self, subnet, subnet_update_args):
"""Update the neutron subnet.
This atomic function updates the subnet with subnet_update_args.
:param subnet: Subnet object
:param subnet_update_args: dict, PUT /v2.0/subnets update options
:returns: updated neutron subnet dict
"""
subnet_update_args["name"] = self.generate_random_name()
return {"subnet": self.neutron.update_subnet(
subnet["subnet"]["id"], **subnet_update_args)}
def _delete_subnet(self, subnet):
"""Delete neutron subnet
:param subnet: Subnet object
"""
self.neutron.delete_subnet(subnet["subnet"]["id"])
def _create_router(self, router_create_args, external_gw=False):
"""Create neutron router.
:param router_create_args: POST /v2.0/routers request options
:returns: neutron router dict
"""
router_create_args.pop("name", None)
if ("tenant_id" in router_create_args
and "project_id" not in router_create_args):
router_create_args["project_id"] = router_create_args.pop(
"tenant_id")
return {"router": self.neutron.create_router(
discover_external_gw=external_gw, **router_create_args)}
def _list_routers(self):
"""Returns user routers list."""
return self.neutron.list_routers()
def _show_router(self, router, **kwargs):
"""Show information of a given router.
:param router: ID or name of router to look up
:kwargs: dict, POST /v2.0/routers show options
:return: details of the router
"""
return {"router": self.neutron.get_router(
router["router"]["id"], **kwargs)}
def _delete_router(self, router):
"""Delete neutron router
:param router: Router object
"""
self.neutron.delete_router(router["router"]["id"])
def _update_router(self, router, router_update_args):
"""Update the neutron router.
This atomic function updates the router with router_update_args.
:param router: dict, neutron router
:param router_update_args: dict, PUT /v2.0/routers update options
:returns: updated neutron router dict
"""
router_update_args["name"] = self.generate_random_name()
return {"router": self.neutron.update_router(
router["router"]["id"], **router_update_args)}
def _create_port(self, network, port_create_args):
"""Create neutron port.
:param network: neutron network dict
:param port_create_args: POST /v2.0/ports request options
:returns: neutron port dict
"""
return {"port": self.neutron.create_port(
network_id=network["network"]["id"], **port_create_args)}
def _list_ports(self):
"""Return user ports list."""
return self.neutron.list_ports()
def _show_port(self, port, **params):
"""Return user port details.
:param port: dict, neutron port
:param params: neutron port show options
:returns: neutron port dict
"""
return {"port": self.neutron.get_port(port["port"]["id"], **params)}
def _update_port(self, port, port_update_args):
"""Update the neutron port.
This atomic function updates port with port_update_args.
:param port: dict, neutron port
:param port_update_args: dict, PUT /v2.0/ports update options
:returns: updated neutron port dict
"""
port_update_args["name"] = self.generate_random_name()
return {"port": self.neutron.update_port(port["port"]["id"],
**port_update_args)}
def _delete_port(self, port):
"""Delete neutron port.
:param port: Port object
"""
self.neutron.delete_port(port["port"]["id"])
@logging.log_deprecated_args(
"network_create_args is deprecated; use the network context instead",
"0.1.0", "network_create_args")
def _get_or_create_network(self, network_create_args=None):
"""Get a network from context, or create a new one.
This lets users either create networks with the 'network'
context, provide existing networks with the 'existing_network'
context, or let the scenario create a default network for
them. Running this without one of the network contexts is
deprecated.
:param network_create_args: Deprecated way to provide network
creation args; use the network
context instead.
:returns: Network dict
"""
if "networks" in self.context["tenant"]:
return {"network":
random.choice(self.context["tenant"]["networks"])}
else:
LOG.warning("Running this scenario without either the 'network' "
"or 'existing_network' context is deprecated")
return self._create_network(network_create_args or {})
def _create_subnets(self, network,
subnet_create_args=None,
subnet_cidr_start=None,
subnets_per_network=1):
"""Create <count> new subnets in the given network.
:param network: network to create subnets in
:param subnet_create_args: dict, POST /v2.0/subnets request options
:param subnet_cidr_start: str, start value for subnets CIDR
:param subnets_per_network: int, number of subnets for one network
:returns: List of subnet dicts
"""
return [self._create_subnet(network, subnet_create_args or {},
subnet_cidr_start)
for i in range(subnets_per_network)]
def _create_network_and_subnets(self,
network_create_args=None,
subnet_create_args=None,
subnets_per_network=1,
subnet_cidr_start="1.0.0.0/24"):
"""Create network and subnets.
:parm network_create_args: dict, POST /v2.0/networks request options
:parm subnet_create_args: dict, POST /v2.0/subnets request options
:parm subnets_per_network: int, number of subnets for one network
:parm subnet_cidr_start: str, start value for subnets CIDR
:returns: tuple of result network and subnets list
"""
subnet_create_args = dict(subnet_create_args or {})
subnet_create_args["start_cidr"] = subnet_cidr_start
net_topo = self.neutron.create_network_topology(
network_create_args=(network_create_args or {}),
subnet_create_args=subnet_create_args,
subnets_count=subnets_per_network
)
subnets = [{"subnet": s} for s in net_topo["subnets"]]
return {"network": net_topo["network"]}, subnets
def _create_network_structure(self, network_create_args=None,
subnet_create_args=None,
subnet_cidr_start=None,
subnets_per_network=None,
router_create_args=None):
"""Create a network and a given number of subnets and routers.
:param network_create_args: dict, POST /v2.0/networks request options
:param subnet_create_args: dict, POST /v2.0/subnets request options
:param subnet_cidr_start: str, start value for subnets CIDR
:param subnets_per_network: int, number of subnets for one network
:param router_create_args: dict, POST /v2.0/routers request options
:returns: tuple of (network, subnets, routers)
"""
subnet_create_args = dict(subnet_create_args or {})
subnet_create_args["start_cidr"] = subnet_cidr_start
net_topo = self.neutron.create_network_topology(
network_create_args=(network_create_args or {}),
router_create_args=(router_create_args or {}),
router_per_subnet=True,
subnet_create_args=subnet_create_args,
subnets_count=subnets_per_network
)
return ({"network": net_topo["network"]},
[{"subnet": s} for s in net_topo["subnets"]],
[{"router": r} for r in net_topo["routers"]])
def _add_interface_router(self, subnet, router):
"""Connect subnet to router.
:param subnet: dict, neutron subnet
:param router: dict, neutron router
"""
self.neutron.add_interface_to_router(router_id=router["id"],
subnet_id=subnet["id"])
def _remove_interface_router(self, subnet, router):
"""Remove subnet from router
:param subnet: dict, neutron subnet
:param router: dict, neutron router
"""
self.neutron.remove_interface_from_router(
router_id=router["id"], subnet_id=subnet["id"])
def _add_gateway_router(self, router, ext_net, enable_snat=None):
"""Set the external network gateway for a router.
:param router: dict, neutron router
:param ext_net: external network for the gateway
:param enable_snat: True if enable snat, None to avoid update
"""
self.neutron.add_gateway_to_router(
router_id=router["router"]["id"],
network_id=ext_net["network"]["id"],
enable_snat=enable_snat
)
def _remove_gateway_router(self, router):
"""Removes an external network gateway from the specified router.
:param router: dict, neutron router
"""
self.neutron.remove_gateway_from_router(router["router"]["id"])
@atomic.action_timer("neutron.create_pool")
def _create_lb_pool(self, subnet_id, **pool_create_args):
"""Create LB pool(v1)
:param subnet_id: str, neutron subnet-id
:param pool_create_args: dict, POST /lb/pools request options
:returns: dict, neutron lb pool
"""
args = {"lb_method": self.LB_METHOD,
"protocol": self.LB_PROTOCOL,
"name": self.generate_random_name(),
"subnet_id": subnet_id}
args.update(pool_create_args)
return self.clients("neutron").create_pool({"pool": args})
def _create_v1_pools(self, networks, **pool_create_args):
"""Create LB pools(v1)
:param networks: list, neutron networks
:param pool_create_args: dict, POST /lb/pools request options
:returns: list, neutron lb pools
"""
subnets = []
pools = []
for net in networks:
subnets.extend(net.get("subnets", []))
for subnet_id in subnets:
pools.append(self._create_lb_pool(
subnet_id, **pool_create_args))
return pools
@atomic.action_timer("neutron.list_pools")
def _list_v1_pools(self, **kwargs):
"""Return user lb pool list(v1)."""
return self.clients("neutron").list_pools(**kwargs)
@atomic.action_timer("neutron.delete_pool")
def _delete_v1_pool(self, pool):
"""Delete neutron pool.
:param pool: Pool object
"""
self.clients("neutron").delete_pool(pool["id"])
@atomic.action_timer("neutron.update_pool")
def _update_v1_pool(self, pool, **pool_update_args):
"""Update pool.
This atomic function updates the pool with pool_update_args.
:param pool: Pool object
:param pool_update_args: dict, POST /lb/pools update options
:returns: updated neutron pool dict
"""
pool_update_args["name"] = self.generate_random_name()
body = {"pool": pool_update_args}
return self.clients("neutron").update_pool(pool["pool"]["id"], body)
def _create_v1_vip(self, pool, **vip_create_args):
"""Create VIP(v1)
:parm pool: dict, neutron lb-pool
:parm vip_create_args: dict, POST /lb/vips request options
:returns: dict, neutron lb vip
"""
args = {"protocol": self.LB_PROTOCOL,
"protocol_port": self.LB_PROTOCOL_PORT,
"name": self.generate_random_name(),
"pool_id": pool["pool"]["id"],
"subnet_id": pool["pool"]["subnet_id"]}
args.update(vip_create_args)
return self.clients("neutron").create_vip({"vip": args})
@atomic.action_timer("neutron.list_vips")
def _list_v1_vips(self, **kwargs):
"""Return user lb vip list(v1)."""
return self.clients("neutron").list_vips(**kwargs)
@atomic.action_timer("neutron.delete_vip")
def _delete_v1_vip(self, vip):
"""Delete neutron vip.
:param vip: neutron Virtual IP object
"""
self.clients("neutron").delete_vip(vip["id"])
@atomic.action_timer("neutron.update_vip")
def _update_v1_vip(self, vip, **vip_update_args):
"""Updates vip.
This atomic function updates vip name and admin state
:param vip: Vip object
:param vip_update_args: dict, POST /lb/vips update options
:returns: updated neutron vip dict
"""
vip_update_args["name"] = self.generate_random_name()
body = {"vip": vip_update_args}
return self.clients("neutron").update_vip(vip["vip"]["id"], body)
def _create_floatingip(self, floating_network, **floating_ip_args):
"""Create floating IP with floating_network.
:param floating_network: str, external network to create floating IP
:param floating_ip_args: dict, POST /floatingips create options
:returns: dict, neutron floating IP
"""
return {"floatingip": self.neutron.create_floatingip(
floating_network=floating_network, **floating_ip_args)}
def _list_floating_ips(self, **kwargs):
"""Return floating IPs list."""
return {"floatingips": self.neutron.list_floatingips(**kwargs)}
def _delete_floating_ip(self, floating_ip):
"""Delete floating IP.
:param dict, floating IP object
"""
return self.neutron.delete_floatingip(floating_ip["id"])
def _associate_floating_ip(self, floatingip, port):
"""Associate floating IP with port.
:param floatingip: floating IP dict
:param port: port dict
:returns: updated floating IP dict
"""
return self.neutron.associate_floatingip(
port_id=port["id"],
floatingip_id=floatingip["id"])
def _dissociate_floating_ip(self, floatingip):
"""Dissociate floating IP from ports.
:param floatingip: floating IP dict
:returns: updated floating IP dict
"""
return self.neutron.dissociate_floatingip(
floatingip_id=floatingip["id"])
@atomic.action_timer("neutron.create_healthmonitor")
def _create_v1_healthmonitor(self, **healthmonitor_create_args):
"""Create LB healthmonitor.
This atomic function creates healthmonitor with the provided
healthmonitor_create_args.
:param healthmonitor_create_args: dict, POST /lb/healthmonitors
:returns: neutron healthmonitor dict
"""
args = {"type": self.HM_TYPE,
"delay": self.HM_DELAY,
"max_retries": self.HM_MAX_RETRIES,
"timeout": self.HM_TIMEOUT}
args.update(healthmonitor_create_args)
return self.clients("neutron").create_health_monitor(
{"health_monitor": args})
@atomic.action_timer("neutron.list_healthmonitors")
def _list_v1_healthmonitors(self, **kwargs):
"""List LB healthmonitors.
This atomic function lists all helthmonitors.
:param kwargs: optional parameters
:returns: neutron lb healthmonitor list
"""
return self.clients("neutron").list_health_monitors(**kwargs)
@atomic.action_timer("neutron.delete_healthmonitor")
def _delete_v1_healthmonitor(self, healthmonitor):
"""Delete neutron healthmonitor.
:param healthmonitor: neutron healthmonitor dict
"""
self.clients("neutron").delete_health_monitor(healthmonitor["id"])
@atomic.action_timer("neutron.update_healthmonitor")
def _update_v1_healthmonitor(self, healthmonitor,
**healthmonitor_update_args):
"""Update neutron healthmonitor.
:param healthmonitor: neutron lb healthmonitor dict
:param healthmonitor_update_args: POST /lb/healthmonitors
update options
:returns: updated neutron lb healthmonitor dict
"""
body = {"health_monitor": healthmonitor_update_args}
return self.clients("neutron").update_health_monitor(
healthmonitor["health_monitor"]["id"], body)
def _create_security_group(self, **security_group_create_args):
"""Create Neutron security-group.
:param security_group_create_args: dict, POST /v2.0/security-groups
request options
:returns: dict, neutron security-group
"""
security_group_create_args["name"] = self.generate_random_name()
return {"security_group": self.neutron.create_security_group(
**security_group_create_args)}
def _delete_security_group(self, security_group):
"""Delete Neutron security group.
:param security_group: dict, neutron security_group
"""
return self.neutron.delete_security_group(
security_group["security_group"]["id"])
def _list_security_groups(self, **kwargs):
"""Return list of Neutron security groups."""
return {"security_groups": self.neutron.list_security_groups(**kwargs)}
def _show_security_group(self, security_group, **kwargs):
"""Show security group details.
:param security_group: dict, neutron security_group
:param kwargs: Optional additional arguments for security_group show
:returns: security_group details
"""
return {"security_group": self.neutron.get_security_group(
security_group["security_group"]["id"], **kwargs)}
def _update_security_group(self, security_group,
**security_group_update_args):
"""Update Neutron security-group.
:param security_group: dict, neutron security_group
:param security_group_update_args: dict, POST /v2.0/security-groups
update options
:returns: dict, updated neutron security-group
"""
security_group_update_args["name"] = self.generate_random_name()
return {"security_group": self.neutron.update_security_group(
security_group["security_group"]["id"],
**security_group_update_args)}
def update_loadbalancer_resource(self, lb):
try:
new_lb = self.clients("neutron").show_loadbalancer(lb["id"])
except Exception as e:
if getattr(e, "status_code", 400) == 404:
raise exceptions.GetResourceNotFound(resource=lb)
raise exceptions.GetResourceFailure(resource=lb, err=e)
return new_lb["loadbalancer"]
@atomic.action_timer("neutron.create_lbaasv2_loadbalancer")
def _create_lbaasv2_loadbalancer(self, subnet_id, **lb_create_args):
"""Create LB loadbalancer(v2)
:param subnet_id: str, neutron subnet-id
:param lb_create_args: dict, POST /lbaas/loadbalancers request options
:returns: dict, neutron lb
"""
args = {"name": self.generate_random_name(),
"vip_subnet_id": subnet_id}
args.update(lb_create_args)
neutronclient = self.clients("neutron")
lb = neutronclient.create_loadbalancer({"loadbalancer": args})
lb = lb["loadbalancer"]
lb = utils.wait_for_status(
lb,
ready_statuses=["ACTIVE"],
status_attr="provisioning_status",
update_resource=self.update_loadbalancer_resource,
timeout=CONF.openstack.neutron_create_loadbalancer_timeout,
check_interval=(
CONF.openstack.neutron_create_loadbalancer_poll_interval)
)
return lb
@atomic.action_timer("neutron.list_lbaasv2_loadbalancers")
def _list_lbaasv2_loadbalancers(self, retrieve_all=True, **lb_list_args):
"""List LB loadbalancers(v2)
:param lb_list_args: dict, POST /lbaas/loadbalancers request options
:returns: dict, neutron lb loadbalancers(v2)
"""
return self.clients("neutron").list_loadbalancers(retrieve_all,
**lb_list_args)
@atomic.action_timer("neutron.create_bgpvpn")
def _create_bgpvpn(self, **kwargs):
"""Create Bgpvpn resource (POST /bgpvpn/bgpvpn)
:param kwargs: optional parameters to create BGP VPN
:returns dict, bgpvpn resource details
"""
kwargs["name"] = self.generate_random_name()
return self.admin_clients("neutron").create_bgpvpn({"bgpvpn": kwargs})
@atomic.action_timer("neutron.delete_bgpvpn")
def _delete_bgpvpn(self, bgpvpn):
"""Delete Bgpvpn resource.(DELETE /bgpvpn/bgpvpns/{id})
:param bgpvpn: dict, bgpvpn
:return dict, bgpvpn
"""
return self.admin_clients("neutron").delete_bgpvpn(
bgpvpn["bgpvpn"]["id"])
@atomic.action_timer("neutron.list_bgpvpns")
def _list_bgpvpns(self, **kwargs):
"""Return bgpvpns list.
:param kwargs: dict, GET /bgpvpn/bgpvpns request options
:returns: bgpvpns list
"""
return self.admin_clients("neutron").list_bgpvpns(
True, **kwargs)["bgpvpns"]
@atomic.action_timer("neutron.update_bgpvpn")
def _update_bgpvpn(self, bgpvpn, update_name=False, **kwargs):
"""Update a bgpvpn.
:param bgpvpn: dict, bgpvpn
:param update_name: update_name: bool, whether or not to modify
BGP VPN name
:param **kwargs: dict, PUT /bgpvpn/bgpvpns update options
:return dict, updated bgpvpn
"""
if update_name or "name" in kwargs:
kwargs["name"] = self.generate_random_name()
return self.admin_clients("neutron").update_bgpvpn(
bgpvpn["bgpvpn"]["id"], {"bgpvpn": kwargs})
@atomic.action_timer("neutron.create_bgpvpn_network_assoc")
def _create_bgpvpn_network_assoc(self, bgpvpn, network):
"""Creates a new BGP VPN network association.
:param bgpvpn: dict, bgpvpn
:param network: dict, network
:return dict: network_association
"""
netassoc = {"network_id": network["id"]}
return self.clients("neutron").create_bgpvpn_network_assoc(
bgpvpn["bgpvpn"]["id"], {"network_association": netassoc})
@atomic.action_timer("neutron.delete_bgpvpn_network_assoc")
def _delete_bgpvpn_network_assoc(self, bgpvpn, net_assoc):
"""Delete the specified BGP VPN network association
:param bgpvpn: dict, bgpvpn
:param net_assoc: dict, network
:return dict: network_association
"""
return self.clients("neutron").delete_bgpvpn_network_assoc(
bgpvpn["bgpvpn"]["id"], net_assoc["network_association"]["id"])
@atomic.action_timer("neutron.create_bgpvpn_router_assoc")
def _create_bgpvpn_router_assoc(self, bgpvpn, router):
"""Creates a new BGP VPN router association.
:param bgpvpn: dict, bgpvpn
:param router: dict, router
:return dict: network_association
"""
router_assoc = {"router_id": router["id"]}
return self.clients("neutron").create_bgpvpn_router_assoc(
bgpvpn["bgpvpn"]["id"], {"router_association": router_assoc})
@atomic.action_timer("neutron.delete_bgpvpn_router_assoc")
def _delete_bgpvpn_router_assoc(self, bgpvpn, router_assoc):
"""Delete the specified BGP VPN router association
:param bgpvpn: dict, bgpvpn
:param router_assoc: dict, router
:return dict: router_association
"""
return self.clients("neutron").delete_bgpvpn_router_assoc(
bgpvpn["bgpvpn"]["id"], router_assoc["router_association"]["id"])
@atomic.action_timer("neutron.list_bgpvpn_network_assocs")
def _list_bgpvpn_network_assocs(self, bgpvpn, **kwargs):
"""List network association of bgpvpn
:param bgpvpn: dict, bgpvpn
:param **kwargs: dict, optional parameters
:return dict: network_association
"""
return self.clients("neutron").list_bgpvpn_network_assocs(
bgpvpn["bgpvpn"]["id"], **kwargs)
@atomic.action_timer("neutron.list_bgpvpn_router_assocs")
def _list_bgpvpn_router_assocs(self, bgpvpn, **kwargs):
"""List router association of bgpvpn
:param bgpvpn: dict, bgpvpn
:param **kwargs: dict, optional parameters
:return dict: router_association
"""
return self.clients("neutron").list_bgpvpn_router_assocs(
bgpvpn["bgpvpn"]["id"], **kwargs)
def _create_security_group_rule(self, security_group_id,
**security_group_rule_args):
"""Create Neutron security-group-rule.
:param security_group_id: id of neutron security_group
:param security_group_rule_args: dict, POST
/v2.0/security-group-rules request options
:returns: dict, neutron security-group-rule
"""
return {"security_group_rule": self.neutron.create_security_group_rule(
security_group_id, **security_group_rule_args
)}
def _list_security_group_rules(self, **kwargs):
"""List all security group rules.
:param kwargs: Optional additional arguments for roles list
:return: list of security group rules
"""
result = self.neutron.list_security_group_rules(**kwargs)
return {"security_group_rules": result}
def _show_security_group_rule(self, security_group_rule, **kwargs):
"""Show information of a given security group rule.
:param security_group_rule: id of security group rule
:param kwargs: Optional additional arguments for roles list
:return: details of security group rule
"""
return {"security_group_rule": self.neutron.get_security_group_rule(
security_group_rule, **kwargs)}
def _delete_security_group_rule(self, security_group_rule):
"""Delete a given security group rule.
:param security_group_rule: id of security group rule
"""
self.neutron.delete_security_group_rule(security_group_rule)
@atomic.action_timer("neutron.delete_trunk")
def _delete_trunk(self, trunk_port):
self.clients("neutron").delete_trunk(trunk_port["port_id"])
@atomic.action_timer("neutron.create_trunk")
def _create_trunk(self, trunk_payload):
trunk_payload["name"] = self.generate_random_name()
return self.clients("neutron").create_trunk({"trunk": trunk_payload})
@atomic.action_timer("neutron.list_trunks")
def _list_trunks(self, **kwargs):
return self.clients("neutron").list_trunks(**kwargs)["trunks"]
@atomic.action_timer("neutron.list_subports_by_trunk")
def _list_subports_by_trunk(self, trunk_id):
return self.clients("neutron").trunk_get_subports(trunk_id)
@atomic.action_timer("neutron._add_subports_to_trunk")
def _add_subports_to_trunk(self, trunk_id, subports):
return self.clients("neutron").trunk_add_subports(
trunk_id, {"sub_ports": subports})
def _list_ports_by_device_id(self, device_id):
return self.neutron.list_ports(device_id=device_id)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,727
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/common/services/network/test_neutron.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally import exceptions
from rally_openstack.common import credential
from rally_openstack.common.services.network import neutron
from tests.unit import test
PATH = "rally_openstack.common.services.network.neutron"
class NeutronServiceTestCase(test.TestCase):
def setUp(self):
super(NeutronServiceTestCase, self).setUp()
self.clients = mock.MagicMock(
credential=credential.OpenStackCredential(
auth_url="example.com",
username="root",
password="changeme"
)
)
self.nc = self.clients.neutron.return_value
self.atomic_inst = []
self.name_generator_count = 0
def name_generator():
self.name_generator_count += 1
return f"s-{self.name_generator_count}"
self.neutron = neutron.NeutronService(
clients=self.clients,
name_generator=name_generator,
atomic_inst=self.atomic_inst
)
def test_create_network_topology_without_a_router(self):
network = {"id": "net-id", "name": "s-1"}
subnets = [
{"id": "subnet1-id", "name": "subnet1-name"},
{"id": "subnet2-id", "name": "subnet2-name"}
]
self.nc.create_network.return_value = {"network": network.copy()}
self.nc.create_subnet.side_effect = [{"subnet": s} for s in subnets]
network_create_args = {}
subnet_create_args = {}
topo = self.neutron.create_network_topology(
network_create_args=network_create_args,
subnet_create_args=subnet_create_args
)
self.assertEqual(
{
"network": dict(subnets=[subnets[0]["id"]], **network),
"subnets": [subnets[0]],
"routers": []
},
topo
)
self.nc.create_network.assert_called_once_with(
{"network": {"name": "s-1"}})
self.nc.create_subnet.assert_called_once_with(
{"subnet": {"name": "s-2", "network_id": "net-id",
"dns_nameservers": mock.ANY, "ip_version": 4,
"cidr": mock.ANY}}
)
self.assertFalse(self.nc.create_router.called)
self.assertFalse(self.nc.add_interface_router.called)
def test_create_network_topology(self):
network = {"id": "net-id", "name": "s-1"}
subnets = [
{"id": "subnet1-id", "name": "subnet1-name"},
{"id": "subnet2-id", "name": "subnet2-name"}
]
router = {"id": "router"}
self.nc.create_network.return_value = {"network": network.copy()}
self.nc.create_router.return_value = {"router": router.copy()}
self.nc.create_subnet.side_effect = [{"subnet": s} for s in subnets]
network_create_args = {}
subnet_create_args = {}
topo = self.neutron.create_network_topology(
network_create_args=network_create_args,
subnet_create_args=subnet_create_args,
router_create_args={},
subnets_count=2,
subnets_dualstack=True
)
self.assertEqual(
{
"network": dict(subnets=[subnets[0]["id"], subnets[1]["id"]],
**network),
"subnets": [subnets[0], subnets[1]],
"routers": [router]
},
topo
)
self.nc.create_network.assert_called_once_with(
{"network": {"name": "s-1"}})
self.nc.create_router.assert_called_once_with(
{"router": {"name": "s-2"}})
self.assertEqual(
[
mock.call({"subnet": {
"name": f"s-{i}", "network_id": "net-id",
"dns_nameservers": mock.ANY,
"ip_version": 4 if i % 3 == 0 else 6,
"cidr": mock.ANY}})
for i in range(3, 5)],
self.nc.create_subnet.call_args_list
)
self.assertEqual(
[
mock.call(router["id"], {"subnet_id": subnets[0]["id"]}),
mock.call(router["id"], {"subnet_id": subnets[1]["id"]})
],
self.nc.add_interface_router.call_args_list
)
def test_delete_network_topology(self):
topo = {
"network": {"id": "net-id"},
"routers": [{"id": "r1"}, {"id": "r2"}, {"id": "r3"}],
"subnets": [{"id": "s-1"}, {"id": "s-2"}, {"id": "s-3"}]
}
self.nc.list_ports.return_value = {
"ports": [
{"id": "p1", "device_owner": "1"},
{"id": "p2", "device_owner": "2"}
]
}
self.nc.list_subnets.return_value = {
"subnets": [{"id": "snet-1"}, {"id": "snet-2"}]
}
self.neutron.delete_network_topology(topo)
self.assertEqual(
[mock.call(r["id"]) for r in topo["routers"]],
self.nc.remove_gateway_router.call_args_list
)
self.nc.list_ports.assert_called_once_with(
network_id=topo["network"]["id"]
)
self.assertEqual(
# subnets from topo object should be ignored and all subnets should
# be listed
[mock.call(s["id"])
for s in self.nc.list_subnets.return_value["subnets"]],
self.nc.delete_subnet.call_args_list
)
self.nc.delete_network.assert_called_once_with(topo["network"]["id"])
self.assertEqual(
[mock.call(r["id"]) for r in topo["routers"]],
self.nc.delete_router.call_args_list
)
def test_create_network(self):
net = "foo"
self.nc.create_network.return_value = {"network": net}
self.assertEqual(
net,
self.neutron.create_network(
provider_physical_network="ppn",
**{"router:external": True}
)
)
self.nc.create_network.assert_called_once_with(
{"network": {"name": "s-1", "provider:physical_network": "ppn",
"router:external": True}}
)
def test_get_network(self):
network = "foo"
self.nc.show_network.return_value = {"network": network}
net_id = "net-id"
self.assertEqual(network, self.neutron.get_network(net_id))
self.nc.show_network.assert_called_once_with(net_id)
self.nc.show_network.reset_mock()
fields = ["a", "b"]
self.assertEqual(network,
self.neutron.get_network(net_id, fields=fields))
self.nc.show_network.assert_called_once_with(net_id, fields=fields)
def test_find_network(self):
net1 = {"id": "net-1", "name": "foo"}
net2 = {"id": "net-2", "name": "bar"}
self.nc.list_networks.return_value = {"networks": [net1, net2]}
self.assertEqual(net2, self.neutron.find_network("bar"))
self.assertEqual(net1, self.neutron.find_network("net-1"))
self.assertRaises(exceptions.GetResourceFailure,
self.neutron.find_network, "net-3")
def test_update_network(self):
network = "foo"
self.nc.update_network.return_value = {"network": network}
net_id = "net-id"
self.assertEqual(network, self.neutron.update_network(
net_id, admin_state_up=False))
self.nc.update_network.assert_called_once_with(
net_id, {"network": {"admin_state_up": False}})
self.nc.update_network.reset_mock()
self.assertRaises(TypeError,
self.neutron.update_network, net_id)
self.assertFalse(self.nc.update_network.called)
def test_delete_network(self):
net_id = "net-id"
self.neutron.delete_network(net_id)
self.nc.delete_network.assert_called_once_with(net_id)
def test_list_networks(self):
net1 = {"id": "net-1", "name": "foo"}
net2 = {"id": "net-2", "name": "bar"}
self.nc.list_networks.return_value = {"networks": [net1, net2]}
self.assertEqual([net1, net2], self.neutron.list_networks())
self.nc.list_networks.assert_called_once_with()
@mock.patch("%s.net_utils.generate_cidr" % PATH)
def test_create_subnet(self, mock_generate_cidr):
net_id = "net-id"
router_id = "router-id"
mock_generate_cidr.return_value = (6, "generated_cidr")
subnet = {"id": "subnet-id"}
self.nc.create_subnet.return_value = {"subnet": subnet}
# case 1:
# - cidr is not specified, so it should be generated
# - ip_version equals to 6, so proper dns nameserbers should be used
# - router_id is specified, so add_interface_router method should be
# called
self.assertEqual(
subnet,
self.neutron.create_subnet(network_id=net_id,
router_id=router_id,
ip_version=6)
)
self.nc.create_subnet.assert_called_once_with({"subnet": {
"name": "s-1",
"network_id": net_id,
"ip_version": 6,
"cidr": "generated_cidr",
"dns_nameservers": self.neutron.IPv6_DEFAULT_DNS_NAMESERVERS
}})
mock_generate_cidr.assert_called_once_with(
ip_version=6,
start_cidr=None
)
self.nc.add_interface_router.assert_called_once_with(
router_id, {"subnet_id": subnet["id"]}
)
mock_generate_cidr.reset_mock()
self.nc.create_subnet.reset_mock()
self.nc.add_interface_router.reset_mock()
# case 2:
# - cidr is specified, so it should not be generated
# - ip_version equals to 4, so proper dns nameserbers should be used
# - router_id is not specified, so add_interface_router method should
# not be called
self.assertEqual(
subnet,
self.neutron.create_subnet(network_id=net_id,
cidr="some-cidr",
ip_version=4)
)
self.nc.create_subnet.assert_called_once_with({"subnet": {
"name": "s-2",
"network_id": net_id,
"ip_version": 4,
"cidr": "some-cidr",
"dns_nameservers": self.neutron.IPv4_DEFAULT_DNS_NAMESERVERS
}})
self.assertFalse(mock_generate_cidr.called)
self.assertFalse(self.nc.add_interface_router.called)
mock_generate_cidr.reset_mock()
self.nc.create_subnet.reset_mock()
self.nc.add_interface_router.reset_mock()
# case 3:
# - cidr is specified, so it should not be generated
# - dns_nameservers equals to None, so default values should not be
# applied
# - router_id is specified, so add_interface_router method should
# be called
self.assertEqual(
subnet,
self.neutron.create_subnet(network_id=net_id,
router_id=router_id,
cidr="some-cidr",
dns_nameservers=None,
ip_version=4)
)
self.nc.create_subnet.assert_called_once_with({"subnet": {
"name": "s-3",
"network_id": net_id,
"ip_version": 4,
"cidr": "some-cidr",
"dns_nameservers": None
}})
self.assertFalse(mock_generate_cidr.called)
self.nc.add_interface_router.assert_called_once_with(
router_id, {"subnet_id": subnet["id"]}
)
def test_get_subnet(self):
subnet = "foo"
self.nc.show_subnet.return_value = {"subnet": subnet}
subnet_id = "subnet-id"
self.assertEqual(subnet, self.neutron.get_subnet(subnet_id))
self.nc.show_subnet.assert_called_once_with(subnet_id)
def test_update_subnet(self):
subnet = "foo"
self.nc.update_subnet.return_value = {"subnet": subnet}
subnet_id = "subnet-id"
self.assertEqual(subnet, self.neutron.update_subnet(
subnet_id, enable_dhcp=False))
self.nc.update_subnet.assert_called_once_with(
subnet_id, {"subnet": {"enable_dhcp": False}})
self.nc.update_subnet.reset_mock()
self.assertRaises(TypeError,
self.neutron.update_subnet, subnet_id)
self.assertFalse(self.nc.update_subnet.called)
def test_delete_subnet(self):
subnet_id = "subnet-id"
self.neutron.delete_subnet(subnet_id)
self.nc.delete_subnet.assert_called_once_with(subnet_id)
def test_list_subnets(self):
subnet1 = {"id": "subnet-1", "name": "foo"}
subnet2 = {"id": "subnet-2", "name": "bar"}
self.nc.list_subnets.return_value = {"subnets": [subnet1, subnet2]}
self.assertEqual([subnet1, subnet2], self.neutron.list_subnets())
self.nc.list_subnets.assert_called_once_with()
def test_create_router(self):
net1 = {"id": "net-1", "name": "foo"}
net2 = {"id": "net-2", "name": "bar"}
self.nc.list_networks.return_value = {"networks": [net1, net2]}
router = {"id": "router-id"}
self.nc.create_router.return_value = {"router": router}
# case 1: external_gateway_info is specified, list_networks should
# not be called
self.assertEqual(
router,
self.neutron.create_router(
external_gateway_info={"network_id": "net-id"},
ha=True
)
)
self.nc.create_router.assert_called_once_with({"router": {
"name": "s-1",
"external_gateway_info": {"network_id": "net-id"},
"ha": True
}})
self.assertFalse(self.nc.list_networks.called)
self.nc.create_router.reset_mock()
# case 2: external_gateway_info is not specified, but
# discover_external_gw is False, so list_networks should not be
# called as well
self.assertEqual(
router,
self.neutron.create_router(
discover_external_gw=False,
ha=True
)
)
self.nc.create_router.assert_called_once_with({"router": {
"name": "s-2",
"ha": True
}})
self.assertFalse(self.nc.list_networks.called)
self.nc.create_router.reset_mock()
# case 3: external_gateway_info is not specified, so list_networks
# should be called to discover external network
self.assertEqual(
router,
self.neutron.create_router(ha=True, discover_external_gw=True)
)
self.nc.create_router.assert_called_once_with({"router": {
"name": "s-3",
"external_gateway_info": {"network_id": net1["id"]},
"ha": True
}})
self.nc.list_networks.assert_called_once_with(
**{"router:external": True}
)
def test_get_router(self):
router = "foo"
self.nc.show_router.return_value = {"router": router}
router_id = "router-id"
self.assertEqual(router, self.neutron.get_router(router_id))
self.nc.show_router.assert_called_once_with(router_id)
self.nc.show_router.reset_mock()
fields = ["a", "b"]
self.assertEqual(router,
self.neutron.get_router(router_id, fields=fields))
self.nc.show_router.assert_called_once_with(router_id, fields=fields)
def test_add_interface_to_router(self):
router_id = "router-id"
subnet_id = "subnet-id"
port_id = "port-id"
self.neutron.add_interface_to_router(router_id, subnet_id=subnet_id)
self.nc.add_interface_router.assert_called_once_with(
router_id, {"subnet_id": subnet_id})
self.nc.add_interface_router.reset_mock()
self.neutron.add_interface_to_router(router_id, port_id=port_id)
self.nc.add_interface_router.assert_called_once_with(
router_id, {"port_id": port_id})
self.nc.add_interface_router.reset_mock()
self.assertRaises(TypeError,
self.neutron.add_interface_to_router, router_id)
self.assertFalse(self.nc.add_interface_router.called)
self.assertRaises(TypeError,
self.neutron.add_interface_to_router, router_id,
port_id=port_id, subnet_id=subnet_id)
self.assertFalse(self.nc.add_interface_router.called)
def test_remove_interface_from_router(self):
router_id = "router-id"
subnet_id = "subnet-id"
port_id = "port-id"
# case 1: use subnet-id
self.neutron.remove_interface_from_router(
router_id, subnet_id=subnet_id)
self.nc.remove_interface_router.assert_called_once_with(
router_id, {"subnet_id": subnet_id})
self.nc.remove_interface_router.reset_mock()
# case 2: use port-id
self.neutron.remove_interface_from_router(router_id, port_id=port_id)
self.nc.remove_interface_router.assert_called_once_with(
router_id, {"port_id": port_id})
self.nc.remove_interface_router.reset_mock()
# case 3: no port and subnet are specified
self.assertRaises(TypeError,
self.neutron.remove_interface_from_router, router_id)
self.assertFalse(self.nc.remove_interface_router.called)
# case 4: both port and subnet are specified
self.assertRaises(TypeError,
self.neutron.remove_interface_from_router, router_id,
port_id=port_id, subnet_id=subnet_id)
self.assertFalse(self.nc.remove_interface_router.called)
def test_test_remove_interface_from_router_silent_error(self):
from neutronclient.common import exceptions as neutron_exceptions
router_id = "router-id"
subnet_id = "subnet-id"
for exc in (neutron_exceptions.BadRequest,
neutron_exceptions.NotFound):
self.nc.remove_interface_router.side_effect = exc
self.neutron.remove_interface_from_router(
router_id, subnet_id=subnet_id)
self.nc.remove_interface_router.assert_called_once_with(
router_id, {"subnet_id": subnet_id})
self.nc.remove_interface_router.reset_mock()
def test_add_gateway_to_router(self):
router_id = "r-id"
net_id = "net-id"
external_fixed_ips = "ex-net-obj"
self.nc.list_extensions.return_value = {
"extensions": [{"alias": "ext-gw-mode"}]
}
# case 1
self.neutron.add_gateway_to_router(
router_id,
network_id=net_id,
external_fixed_ips=external_fixed_ips,
enable_snat=True
)
self.nc.add_gateway_router.assert_called_once_with(
router_id, {"network_id": net_id,
"enable_snat": True,
"external_fixed_ips": external_fixed_ips})
self.nc.add_gateway_router.reset_mock()
# case 2
self.neutron.add_gateway_to_router(router_id, network_id=net_id)
self.nc.add_gateway_router.assert_called_once_with(
router_id, {"network_id": net_id})
def test_remove_gateway_from_router(self):
router_id = "r-id"
self.neutron.remove_gateway_from_router(router_id)
self.nc.remove_gateway_router.assert_called_once_with(router_id)
def test_update_router(self):
router = "foo"
self.nc.update_router.return_value = {"router": router}
router_id = "subnet-id"
self.assertEqual(router, self.neutron.update_router(
router_id, admin_state_up=False))
self.nc.update_router.assert_called_once_with(
router_id, {"router": {"admin_state_up": False}})
self.nc.update_router.reset_mock()
self.assertRaises(TypeError,
self.neutron.update_router, router_id)
self.assertFalse(self.nc.update_router.called)
def test_delete_router(self):
router_id = "r-id"
self.neutron.delete_router(router_id)
self.nc.delete_router.assert_called_once_with(router_id)
def test_list_routers(self):
router1 = {
"id": "router-1",
"name": "r1",
"external_gateway_info": None
}
router2 = {
"id": "router-2",
"name": "r2",
"external_gateway_info": {"external_fixed_ips": []}
}
router3 = {
"id": "router-3",
"name": "r3",
"external_gateway_info": {
"external_fixed_ips": [{"subnet_id": "s1"}]
}
}
router4 = {
"id": "router-4",
"name": "r4",
"external_gateway_info": {
"external_fixed_ips": [{"subnet_id": "s1"},
{"subnet_id": "s2"}]
}
}
router5 = {
"id": "router-5",
"name": "r5",
"external_gateway_info": {
"external_fixed_ips": [{"subnet_id": "s2"}]
}
}
self.nc.list_routers.return_value = {"routers": [
router1, router2, router3, router4, router5]}
# case 1: use native neutron api filters
self.assertEqual(
[router1, router2, router3, router4, router5],
self.neutron.list_routers(admin_state_up=True)
)
self.nc.list_routers.assert_called_once_with(admin_state_up=True)
self.nc.list_routers.reset_mock()
# case 2: use additional post api filtering by subnet
self.assertEqual(
[router4, router5],
self.neutron.list_routers(subnet_ids=["s2"])
)
self.nc.list_routers.assert_called_once_with()
def test_create_port(self):
net_id = "net-id"
port = "foo"
self.nc.create_port.return_value = {"port": port}
self.assertEqual(port, self.neutron.create_port(network_id=net_id))
self.nc.create_port.assert_called_once_with(
{"port": {"name": "s-1", "network_id": net_id}}
)
def test_get_port(self):
port = "foo"
self.nc.show_port.return_value = {"port": port}
port_id = "net-id"
self.assertEqual(port, self.neutron.get_port(port_id))
self.nc.show_port.assert_called_once_with(port_id)
self.nc.show_port.reset_mock()
fields = ["a", "b"]
self.assertEqual(port,
self.neutron.get_port(port_id, fields=fields))
self.nc.show_port.assert_called_once_with(port_id, fields=fields)
def test_update_port(self):
port = "foo"
self.nc.update_port.return_value = {"port": port}
port_id = "net-id"
self.assertEqual(port, self.neutron.update_port(
port_id, admin_state_up=False))
self.nc.update_port.assert_called_once_with(
port_id, {"port": {"admin_state_up": False}})
self.nc.update_port.reset_mock()
self.assertRaises(TypeError, self.neutron.update_port, port_id)
self.assertFalse(self.nc.update_port.called)
def test_delete_port(self):
# case 1: port argument is a string with port ID
port = "port-id"
self.neutron.delete_port(port)
self.nc.delete_port.assert_called_once_with(port)
self.assertFalse(self.nc.remove_gateway_router.called)
self.assertFalse(self.nc.remove_interface_router.called)
self.nc.delete_port.reset_mock()
# case 2: port argument is a dict with an id and not-special
# device_owner
port = {"id": "port-id", "device_owner": "someone",
"device_id": "device-id"}
self.neutron.delete_port(port)
self.nc.delete_port.assert_called_once_with(port["id"])
self.assertFalse(self.nc.remove_interface_router.called)
self.nc.delete_port.reset_mock()
# case 3: port argument is a dict with an id and owner is a router
# interface
port = {"id": "port-id",
"device_id": "device-id",
"device_owner": "network:router_interface_distributed"}
self.neutron.delete_port(port)
self.assertFalse(self.nc.delete_port.called)
self.assertFalse(self.nc.remove_gateway_router.called)
self.nc.remove_interface_router.assert_called_once_with(
port["device_id"], {"port_id": port["id"]}
)
self.nc.delete_port.reset_mock()
self.nc.remove_interface_router.reset_mock()
# case 4: port argument is a dict with an id and owner is a router
# gateway
port = {"id": "port-id",
"device_id": "device-id",
"device_owner": "network:router_gateway"}
self.neutron.delete_port(port)
self.assertFalse(self.nc.delete_port.called)
self.nc.remove_gateway_router.assert_called_once_with(
port["device_id"]
)
self.nc.remove_interface_router.assert_called_once_with(
port["device_id"], {"port_id": port["id"]}
)
def test_delete_port_silently(self):
from neutronclient.common import exceptions as neutron_exceptions
self.nc.delete_port.side_effect = neutron_exceptions.PortNotFoundClient
port = "port-id"
self.neutron.delete_port(port)
self.nc.delete_port.assert_called_once_with(port)
self.assertFalse(self.nc.remove_gateway_router.called)
self.assertFalse(self.nc.remove_interface_router.called)
def test_list_ports(self):
port1 = {"id": "port-1", "name": "foo"}
port2 = {"id": "port-2", "name": "bar"}
self.nc.list_ports.return_value = {"ports": [port1, port2]}
self.assertEqual([port1, port2], self.neutron.list_ports())
self.nc.list_ports.assert_called_once_with()
def test_create_floatingip(self):
floatingip = "foo"
self.nc.create_floatingip.return_value = {"floatingip": floatingip}
networks = [
{"id": "net1-id", "name": "net1"},
{"id": "net2-id", "name": "net2", "router:external": True},
{"id": "net3-id", "name": "net3", "router:external": False}
]
self.nc.list_networks.return_value = {"networks": networks}
# case 1: floating_network is a dict with network id
floating_network = {"id": "net-id"}
self.assertEqual(
floatingip,
self.neutron.create_floatingip(floating_network=floating_network)
)
self.nc.create_floatingip.assert_called_once_with(
{
"floatingip": {"description": "s-1",
"floating_network_id": floating_network["id"]}
}
)
self.assertFalse(self.nc.list_networks.called)
self.nc.create_floatingip.reset_mock()
# case 2: floating_network is an ID
floating_network = "net2-id"
self.assertEqual(
floatingip,
self.neutron.create_floatingip(floating_network=floating_network)
)
self.nc.create_floatingip.assert_called_once_with(
{
"floatingip": {"description": "s-2",
"floating_network_id": floating_network}
}
)
self.nc.list_networks.assert_called_once_with()
self.nc.create_floatingip.reset_mock()
self.nc.list_networks.reset_mock()
# case 3: floating_network is an ID
floating_network = "net2-id"
self.assertEqual(
floatingip,
self.neutron.create_floatingip(floating_network=floating_network)
)
self.nc.create_floatingip.assert_called_once_with(
{
"floatingip": {"description": "s-3",
"floating_network_id": floating_network}
}
)
self.nc.list_networks.assert_called_once_with()
self.nc.create_floatingip.reset_mock()
self.nc.list_networks.reset_mock()
# case 4: floating_network is a name of not external network
floating_network = "net3"
self.assertRaises(
exceptions.NotFoundException,
self.neutron.create_floatingip, floating_network=floating_network
)
self.assertFalse(self.nc.create_floatingip.called)
self.nc.list_networks.assert_called_once_with()
self.nc.create_floatingip.reset_mock()
self.nc.list_networks.reset_mock()
# case 4: floating_network is not specified
self.assertEqual(
floatingip,
self.neutron.create_floatingip()
)
self.nc.create_floatingip.assert_called_once_with(
{
"floatingip": {"description": "s-4",
"floating_network_id": networks[0]["id"]}
}
)
self.nc.list_networks.assert_called_once_with(
**{"router:external": True})
self.nc.create_floatingip.reset_mock()
self.nc.list_networks.reset_mock()
def test_create_floatingip_pre_newton(self):
self.clients.credential.api_info["neutron"] = {"pre_newton": True}
floatingip = "foo"
self.nc.create_floatingip.return_value = {"floatingip": floatingip}
floating_network = {"id": "net-id"}
self.assertEqual(
floatingip,
self.neutron.create_floatingip(floating_network=floating_network)
)
self.nc.create_floatingip.assert_called_once_with(
{
"floatingip": {"floating_network_id": floating_network["id"]}
}
)
# generate random name should not be called
self.assertEqual(0, self.name_generator_count)
@mock.patch("%s.LOG.info" % PATH)
def test_create_floatingip_failure(self, mock_log_info):
from neutronclient.common import exceptions as neutron_exceptions
# case 1: an error which we should not handle
self.nc.create_floatingip.side_effect = neutron_exceptions.BadRequest(
"oops"
)
self.assertRaises(
neutron_exceptions.BadRequest,
self.neutron.create_floatingip, floating_network={"id": "net-id"}
)
self.assertFalse(mock_log_info.called)
# case 2: exception that we should handle
self.nc.create_floatingip.side_effect = neutron_exceptions.BadRequest(
"Unrecognized attribute: 'description'"
)
self.assertRaises(
neutron_exceptions.BadRequest,
self.neutron.create_floatingip, floating_network={"id": "net-id"}
)
self.assertTrue(mock_log_info.called)
def test_get_floatingip(self):
floatingip = "foo"
self.nc.show_floatingip.return_value = {"floatingip": floatingip}
floatingip_id = "fip-id"
self.assertEqual(floatingip,
self.neutron.get_floatingip(floatingip_id))
self.nc.show_floatingip.assert_called_once_with(floatingip_id)
self.nc.show_floatingip.reset_mock()
fields = ["a", "b"]
self.assertEqual(
floatingip,
self.neutron.get_floatingip(floatingip_id, fields=fields)
)
self.nc.show_floatingip.assert_called_once_with(floatingip_id,
fields=fields)
def test_update_floatingip(self):
floatingip = "foo"
self.nc.update_floatingip.return_value = {"floatingip": floatingip}
floatingip_id = "fip-id"
self.assertEqual(floatingip, self.neutron.update_floatingip(
floatingip_id, port_id="port-id"))
self.nc.update_floatingip.assert_called_once_with(
floatingip_id, {"floatingip": {"port_id": "port-id"}})
self.nc.update_floatingip.reset_mock()
self.assertRaises(TypeError,
self.neutron.update_floatingip, floatingip_id)
self.assertFalse(self.nc.update_floatingip.called)
def test_associate_floatingip(self):
floatingip_id = "fip-id"
device_id = "device-id"
floating_ip_address = "floating_ip_address"
floatingip = "foo"
self.nc.update_floatingip.return_value = {"floatingip": floatingip}
port_id = "port-id"
self.nc.list_ports.return_value = {
"ports": [{"id": port_id, "device_id": device_id}]
}
self.nc.list_floatingips.return_value = {
"floatingips": [{"id": floatingip_id}]
}
# case 1:
# - port_id is None, so it should be discovered using device_id
# - floatingip_id is not None, so nothing should be specified here
self.assertEqual(
floatingip,
self.neutron.associate_floatingip(
device_id=device_id, floatingip_id=floatingip_id))
self.nc.update_floatingip.assert_called_once_with(
floatingip_id, {"floatingip": {"port_id": port_id}})
self.nc.list_ports.assert_called_once_with(device_id=device_id)
self.assertFalse(self.nc.list_floatingips.called)
self.nc.update_floatingip.reset_mock()
self.nc.list_ports.reset_mock()
# case 2:
# - port_id is not None, so not discovery should be performed
# - floatingip_id is None, so it should be discovered
self.assertEqual(
floatingip,
self.neutron.associate_floatingip(
port_id=port_id, floating_ip_address=floating_ip_address,
fixed_ip_address="fixed_ip_addr"
))
self.nc.update_floatingip.assert_called_once_with(
floatingip_id,
{"floatingip": {"port_id": port_id,
"fixed_ip_address": "fixed_ip_addr"}})
self.assertFalse(self.nc.list_ports.called)
self.nc.list_floatingips.assert_called_once_with(
floating_ip_address=floating_ip_address
)
self.nc.update_floatingip.reset_mock()
self.nc.list_ports.reset_mock()
self.nc.list_floatingips.reset_mock()
# case 3:
# - port_id is not None, so not discovery should be performed
# - floatingip_id is None, so it should be discovered, but error
# happens
self.nc.list_floatingips.return_value = {"floatingips": []}
self.assertRaises(
exceptions.GetResourceFailure,
self.neutron.associate_floatingip,
port_id=port_id, floating_ip_address=floating_ip_address
)
self.assertFalse(self.nc.update_floatingip.called)
self.assertFalse(self.nc.list_ports.called)
self.nc.list_floatingips.assert_called_once_with(
floating_ip_address=floating_ip_address
)
self.nc.update_floatingip.reset_mock()
self.nc.list_floatingips.reset_mock()
# case 4:
# - port_id is None, so discovery should be performed, but error
# happens
# - floatingip_id is None, so discovery should not be performed
# since port discovery fails first
self.nc.list_floatingips.return_value = {"floatingips": []}
self.nc.list_ports.return_value = {"ports": []}
self.assertRaises(
exceptions.GetResourceFailure,
self.neutron.associate_floatingip,
device_id=device_id, floating_ip_address=floating_ip_address
)
self.nc.list_ports.assert_called_once_with(device_id=device_id)
self.assertFalse(self.nc.update_floatingip.called)
self.assertFalse(self.nc.list_floatingips.called)
def test_associate_floatingip_typeerror(self):
# no device_id and port_id
self.assertRaises(TypeError, self.neutron.associate_floatingip)
# both args are specified
self.assertRaises(TypeError, self.neutron.associate_floatingip,
device_id="d-id", port_id="p-id")
# no floating_ip_address and floating_ip_id
self.assertRaises(TypeError, self.neutron.associate_floatingip,
port_id="p-id")
# both args are specified
self.assertRaises(TypeError, self.neutron.associate_floatingip,
port_id="p-id",
floating_ip_address="fip", floating_ip_id="fip_id")
def test_disassociate_floatingip(self):
floatingip_id = "fip-id"
floating_ip_address = "floating_ip_address"
floatingip = "foo"
self.nc.update_floatingip.return_value = {"floatingip": floatingip}
self.nc.list_floatingips.return_value = {
"floatingips": [{"id": floatingip_id}]
}
# case 1: floatingip_id is specified
self.assertEqual(
floatingip,
self.neutron.dissociate_floatingip(floatingip_id=floatingip_id))
self.nc.update_floatingip.assert_called_once_with(
floatingip_id, {"floatingip": {"port_id": None}})
self.assertFalse(self.nc.list_floatingips.called)
self.nc.update_floatingip.reset_mock()
# case 2: floating_ip_address is specified
self.assertEqual(
floatingip,
self.neutron.dissociate_floatingip(
floating_ip_address=floating_ip_address
))
self.nc.update_floatingip.assert_called_once_with(
floatingip_id, {"floatingip": {"port_id": None}})
self.nc.list_floatingips.assert_called_once_with(
floating_ip_address=floating_ip_address
)
self.nc.update_floatingip.reset_mock()
self.nc.list_floatingips.reset_mock()
# case 3: floating_ip_address is specified but failing to
# find floatingip by it
self.nc.list_floatingips.return_value = {"floatingips": []}
self.assertRaises(
exceptions.GetResourceFailure,
self.neutron.dissociate_floatingip,
floating_ip_address=floating_ip_address
)
self.assertFalse(self.nc.update_floatingip.called)
self.nc.list_floatingips.assert_called_once_with(
floating_ip_address=floating_ip_address
)
def test_disassociate_floatingip_typeerror(self):
# no floating_ip_address and floating_ip_id
self.assertRaises(TypeError, self.neutron.dissociate_floatingip)
# both args are specified
self.assertRaises(TypeError, self.neutron.dissociate_floatingip,
floating_ip_address="fip", floating_ip_id="fip_id")
def delete_floatingip(self):
floatingip_id = "fip-id"
self.neutron.delete_floatingip(floatingip_id)
self.nc.delete_floatingip.assert_called_once_with(floatingip_id)
def test_list_floatingips(self):
floatingip_1 = {"id": "fip-1", "name": "foo"}
floatingip_2 = {"id": "fip-2", "name": "bar"}
self.nc.list_floatingips.return_value = {
"floatingips": [floatingip_1, floatingip_2]
}
self.assertEqual(
[floatingip_1, floatingip_2],
self.neutron.list_floatingips(port_id="port-id")
)
self.nc.list_floatingips.assert_called_once_with(port_id="port-id")
def test_create_security_group(self):
security_group = "foo"
self.nc.create_security_group.return_value = {
"security_group": security_group}
self.assertEqual(
security_group, self.neutron.create_security_group(stateful=True)
)
self.nc.create_security_group.assert_called_once_with(
{"security_group": {"name": "s-1", "stateful": True}}
)
def test_get_security_group(self):
security_group = "foo"
self.nc.show_security_group.return_value = {
"security_group": security_group}
security_group_id = "security-group-id"
self.assertEqual(security_group,
self.neutron.get_security_group(security_group_id))
self.nc.show_security_group.assert_called_once_with(security_group_id)
self.nc.show_security_group.reset_mock()
fields = ["a", "b"]
self.assertEqual(
security_group,
self.neutron.get_security_group(security_group_id, fields=fields))
self.nc.show_security_group.assert_called_once_with(
security_group_id, fields=fields)
def test_update_update_security_group(self):
security_group = "foo"
self.nc.update_security_group.return_value = {
"security_group": security_group}
security_group_id = "security-group-id"
self.assertEqual(
security_group,
self.neutron.update_security_group(
security_group_id, stateful=False))
self.nc.update_security_group.assert_called_once_with(
security_group_id, {"security_group": {"stateful": False}})
self.nc.update_security_group.reset_mock()
self.assertRaises(
TypeError,
self.neutron.update_security_group, security_group_id)
self.assertFalse(self.nc.update_security_group.called)
def test_delete_security_group(self):
security_group_id = "security-group-id"
self.neutron.delete_security_group(security_group_id)
self.nc.delete_security_group.assert_called_once_with(
security_group_id)
def test_list_security_groups(self):
sg1 = {"id": "sg-1", "name": "foo"}
sg2 = {"id": "sg-2", "name": "bar"}
self.nc.list_security_groups.return_value = {
"security_groups": [sg1, sg2]
}
self.assertEqual([sg1, sg2], self.neutron.list_security_groups())
self.nc.list_security_groups.assert_called_once_with()
def test_create_security_group_rule(self):
security_group_rule = "foo"
self.nc.create_security_group_rule.return_value = {
"security_group_rule": security_group_rule}
self.assertEqual(
security_group_rule,
self.neutron.create_security_group_rule(
security_group_id="sg1", )
)
self.nc.create_security_group_rule.assert_called_once_with(
{"security_group_rule": {
"security_group_id": "sg1", "direction": "ingress",
"protocol": "tcp"
}}
)
def test_get_security_group_rule(self):
security_group_rule = "foo"
self.nc.show_security_group_rule.return_value = {
"security_group_rule": security_group_rule}
security_group_rule_id = "security-group-id"
self.assertEqual(
security_group_rule,
self.neutron.get_security_group_rule(security_group_rule_id))
self.nc.show_security_group_rule.assert_called_once_with(
security_group_rule_id)
self.nc.show_security_group_rule.reset_mock()
fields = ["a", "b"]
self.assertEqual(
security_group_rule,
self.neutron.get_security_group_rule(
security_group_rule_id, fields=fields))
self.nc.show_security_group_rule.assert_called_once_with(
security_group_rule_id, fields=fields)
def test_delete_security_group_rule(self):
security_group_rule_id = "security-group-rule-id"
self.neutron.delete_security_group_rule(security_group_rule_id)
self.nc.delete_security_group_rule.assert_called_once_with(
security_group_rule_id)
def test_list_security_groups_rule(self):
sgr1 = {"id": "sg-1", "name": "foo"}
sgr2 = {"id": "sg-2", "name": "bar"}
self.nc.list_security_group_rules.return_value = {
"security_group_rules": [sgr1, sgr2]
}
self.assertEqual([sgr1, sgr2],
self.neutron.list_security_group_rules())
self.nc.list_security_group_rules.assert_called_once_with()
def test_list_agents(self):
agent1 = {"id": "agent-1", "name": "foo"}
agent2 = {"id": "agent-2", "name": "bar"}
self.nc.list_agents.return_value = {"agents": [agent1, agent2]}
self.assertEqual([agent1, agent2], self.neutron.list_agents())
self.nc.list_agents.assert_called_once_with()
def test_list_extensions(self):
ext1 = {"alias": "foo"}
ext2 = {"alias": "bar"}
self.nc.list_extensions.return_value = {"extensions": [ext1, ext2]}
self.assertEqual([ext1, ext2], self.neutron.list_extensions())
self.nc.list_extensions.assert_called_once_with()
def test_cached_supported_extensions(self):
ext1 = {"alias": "foo"}
ext2 = {"alias": "bar"}
self.nc.list_extensions.return_value = {"extensions": [ext1, ext2]}
self.assertEqual([ext1, ext2],
self.neutron.cached_supported_extensions)
self.nc.list_extensions.assert_called_once_with()
self.nc.list_extensions.reset_mock()
# another try
self.assertEqual([ext1, ext2],
self.neutron.cached_supported_extensions)
self.assertFalse(self.nc.list_extensions.called)
def test_supports_extension(self):
ext1 = {"alias": "foo"}
ext2 = {"alias": "bar"}
self.nc.list_extensions.return_value = {"extensions": [ext1, ext2]}
self.assertTrue(self.neutron.supports_extension("foo"))
self.assertTrue(self.neutron.supports_extension("bar"))
self.assertFalse(self.neutron.supports_extension("xxx", silent=True))
self.assertRaises(exceptions.NotFoundException,
self.neutron.supports_extension, "xxx")
# this should be called once
self.nc.list_extensions.assert_called_once_with()
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,728
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/verification/tempest/test_context.py
|
# Copyright 2017: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from unittest import mock
import ddt
import requests
from rally.common import cfg
from rally import exceptions
from rally_openstack.verification.tempest import config
from rally_openstack.verification.tempest import context
from tests.unit import fakes
from tests.unit import test
CONF = cfg.CONF
CRED = {
"username": "admin",
"tenant_name": "admin",
"password": "admin-12345",
"auth_url": "http://test:5000/v2.0/",
"permission": "admin",
"region_name": "test",
"https_insecure": False,
"https_cacert": "/path/to/cacert/file",
"user_domain_name": "admin",
"project_domain_name": "admin"
}
NET_PATH = "rally_openstack.common.services.network"
PATH = "rally_openstack.verification.tempest.context"
@ddt.ddt
class TempestContextTestCase(test.TestCase):
def setUp(self):
super(TempestContextTestCase, self).setUp()
self.mock_isfile = mock.patch("os.path.isfile",
return_value=True).start()
self.cred = fakes.FakeCredential(**CRED)
p_cred = mock.patch(PATH + ".credential.OpenStackCredential",
return_value=self.cred)
p_cred.start()
self.addCleanup(p_cred.stop)
self.env = mock.Mock(data={"platforms": {"openstack": {
"platform_data": {"admin": {}}}}}
)
cfg = {"verifier": mock.Mock(env=self.env),
"verification": {"uuid": "uuid"}}
cfg["verifier"].manager.home_dir = "/p/a/t/h"
cfg["verifier"].manager.configfile = "/fake/path/to/config"
self.context = context.TempestContext(cfg)
self.context.conf.add_section("compute")
self.context.conf.add_section("orchestration")
self.context.conf.add_section("scenario")
@mock.patch("%s.open" % PATH, side_effect=mock.mock_open(), create=True)
def test__download_image_from_glance(self, mock_open):
self.mock_isfile.return_value = False
img_path = os.path.join(self.context.data_dir, "foo")
img = mock.MagicMock()
glanceclient = self.context.clients.glance()
glanceclient.images.data.return_value = "data"
self.context._download_image_from_source(img_path, img)
mock_open.assert_called_once_with(img_path, "wb")
glanceclient.images.data.assert_called_once_with(img.id)
mock_open().write.assert_has_calls([mock.call("d"),
mock.call("a"),
mock.call("t"),
mock.call("a")])
@mock.patch("%s.open" % PATH, side_effect=mock.mock_open())
@mock.patch("requests.get", return_value=mock.MagicMock(status_code=200))
def test__download_image_from_url_success(self, mock_get, mock_open):
self.mock_isfile.return_value = False
img_path = os.path.join(self.context.data_dir, "foo")
mock_get.return_value.iter_content.return_value = "data"
self.context._download_image_from_source(img_path)
mock_get.assert_called_once_with(CONF.openstack.img_url, stream=True)
mock_open.assert_called_once_with(img_path, "wb")
mock_open().write.assert_has_calls([mock.call("d"),
mock.call("a"),
mock.call("t"),
mock.call("a")])
@mock.patch("requests.get")
@ddt.data(404, 500)
def test__download_image_from_url_failure(self, status_code, mock_get):
self.mock_isfile.return_value = False
mock_get.return_value = mock.MagicMock(status_code=status_code)
self.assertRaises(exceptions.RallyException,
self.context._download_image_from_source,
os.path.join(self.context.data_dir, "foo"))
@mock.patch("requests.get", side_effect=requests.ConnectionError())
def test__download_image_from_url_connection_error(
self, mock_requests_get):
self.mock_isfile.return_value = False
self.assertRaises(exceptions.RallyException,
self.context._download_image_from_source,
os.path.join(self.context.data_dir, "foo"))
@mock.patch("rally_openstack.common.wrappers."
"network.NeutronWrapper.create_network")
@mock.patch("%s.open" % PATH, side_effect=mock.mock_open())
def test_options_configured_manually(
self, mock_open, mock_neutron_wrapper_create_network):
self.context.available_services = ["glance", "heat", "nova", "neutron"]
self.context.conf.set("compute", "image_ref", "id1")
self.context.conf.set("compute", "image_ref_alt", "id2")
self.context.conf.set("compute", "flavor_ref", "id3")
self.context.conf.set("compute", "flavor_ref_alt", "id4")
self.context.conf.set("compute", "fixed_network_name", "name1")
self.context.conf.set("orchestration", "instance_type", "id5")
self.context.conf.set("scenario", "img_file", "id6")
self.context.__enter__()
glanceclient = self.context.clients.glance()
novaclient = self.context.clients.nova()
self.assertEqual(0, glanceclient.images.create.call_count)
self.assertEqual(0, novaclient.flavors.create.call_count)
self.assertEqual(0, mock_neutron_wrapper_create_network.call_count)
def test__create_tempest_roles(self):
role1 = CONF.openstack.swift_operator_role
role2 = CONF.openstack.swift_reseller_admin_role
role3 = CONF.openstack.heat_stack_owner_role
role4 = CONF.openstack.heat_stack_user_role
client = self.context.clients.verified_keystone()
client.roles.list.return_value = [fakes.FakeRole(name=role1),
fakes.FakeRole(name=role2)]
client.roles.create.side_effect = [fakes.FakeFlavor(name=role3),
fakes.FakeFlavor(name=role4)]
self.context._create_tempest_roles()
self.assertEqual(2, client.roles.create.call_count)
created_roles = [role.name for role in self.context._created_roles]
self.assertIn(role3, created_roles)
self.assertIn(role4, created_roles)
@mock.patch("rally_openstack.common.services.image.image.Image")
def test__discover_image(self, mock_image):
client = mock_image.return_value
client.list_images.return_value = [fakes.FakeImage(name="Foo"),
fakes.FakeImage(name="CirrOS")]
image = self.context._discover_image()
self.assertEqual("CirrOS", image.name)
@mock.patch("%s.open" % PATH, side_effect=mock.mock_open(), create=True)
@mock.patch("rally_openstack.common.services.image.image.Image")
@mock.patch("os.path.isfile", return_value=False)
def test__download_image(self, mock_isfile, mock_image, mock_open):
img_1 = mock.MagicMock()
img_1.name = "Foo"
img_2 = mock.MagicMock()
img_2.name = "CirrOS"
glanceclient = self.context.clients.glance()
glanceclient.images.data.return_value = "data"
mock_image.return_value.list_images.return_value = [img_1, img_2]
self.context._download_image()
img_path = os.path.join(self.context.data_dir, self.context.image_name)
mock_image.return_value.list_images.assert_called_once_with(
status="active", visibility="public")
glanceclient.images.data.assert_called_once_with(img_2.id)
mock_open.assert_called_once_with(img_path, "wb")
mock_open().write.assert_has_calls([mock.call("d"),
mock.call("a"),
mock.call("t"),
mock.call("a")])
# We can choose any option to test the '_configure_option' method. So let's
# configure the 'flavor_ref' option.
def test__configure_option(self):
helper_method = mock.MagicMock()
helper_method.side_effect = [fakes.FakeFlavor(id="id1")]
self.context.conf.set("compute", "flavor_ref", "")
self.context._configure_option("compute", "flavor_ref",
helper_method=helper_method, flv_ram=64,
flv_disk=5)
self.assertEqual(1, helper_method.call_count)
result = self.context.conf.get("compute", "flavor_ref")
self.assertEqual("id1", result)
@mock.patch("rally_openstack.common.services.image.image.Image")
def test__discover_or_create_image_when_image_exists(self, mock_image):
client = mock_image.return_value
client.list_images.return_value = [fakes.FakeImage(name="CirrOS")]
image = self.context._discover_or_create_image()
self.assertEqual("CirrOS", image.name)
self.assertEqual(0, client.create_image.call_count)
self.assertEqual(0, len(self.context._created_images))
@mock.patch("rally_openstack.common.services.image.image.Image")
def test__discover_or_create_image(self, mock_image):
client = mock_image.return_value
image = self.context._discover_or_create_image()
self.assertEqual(image, mock_image().create_image.return_value)
self.assertEqual(self.context._created_images[0],
client.create_image.return_value)
params = {"container_format": CONF.openstack.img_container_format,
"image_location": mock.ANY,
"disk_format": CONF.openstack.img_disk_format,
"image_name": mock.ANY,
"visibility": "public"}
client.create_image.assert_called_once_with(**params)
def test__discover_or_create_flavor_when_flavor_exists(self):
client = self.context.clients.nova()
client.flavors.list.return_value = [fakes.FakeFlavor(id="id1", ram=64,
vcpus=1, disk=5)]
flavor = self.context._discover_or_create_flavor(64, 5)
self.assertEqual("id1", flavor.id)
self.assertEqual(0, len(self.context._created_flavors))
def test__discover_or_create_flavor(self):
client = self.context.clients.nova()
client.flavors.list.return_value = []
client.flavors.create.side_effect = [fakes.FakeFlavor(id="id1")]
flavor = self.context._discover_or_create_flavor(64, 5)
self.assertEqual("id1", flavor.id)
self.assertEqual("id1", self.context._created_flavors[0].id)
def test__create_network_resources(self):
client = self.context.clients.neutron()
fake_network = {
"id": "nid1",
"name": "network",
"status": "status"}
client.create_network.side_effect = [{"network": fake_network}]
client.create_router.side_effect = [{"router": {"id": "rid1"}}]
client.create_subnet.side_effect = [{"subnet": {"id": "subid1"}}]
client.list_networks.return_value = {"networks": []}
net_topo = self.context._create_network_resources()
self.assertEqual("nid1", net_topo["network"]["id"])
self.assertEqual("rid1", net_topo["routers"][0]["id"])
self.assertEqual("subid1", net_topo["subnets"][0]["id"])
@mock.patch("%s.neutron.NeutronService.supports_extension" % PATH)
def test__create_network_resources_public_network_override(
self, mock_supports_extension):
mock_supports_extension.return_value = True
client = self.context.clients.neutron()
conf = self.context.conf
conf.add_section("network")
conf.set("network", "public_network_id", "my-uuid")
fake_network = {
"id": "nid1",
"name": "network",
"status": "status"}
client.create_network.side_effect = [{"network": fake_network}]
client.create_router.side_effect = [{"router": {"id": "rid1"}}]
client.create_subnet.side_effect = [{"subnet": {"id": "subid1"}}]
client.list_networks.return_value = {"networks": []}
self.context._create_network_resources()
_name, pos, _kwargs = client.create_router.mock_calls[0]
router = pos[0]["router"]
external_gateway_info = router["external_gateway_info"]
self.assertEqual("my-uuid", external_gateway_info["network_id"])
self.assertTrue(external_gateway_info["enable_snat"])
def test__cleanup_tempest_roles(self):
self.context._created_roles = [fakes.FakeRole(), fakes.FakeRole()]
self.context._cleanup_tempest_roles()
client = self.context.clients.keystone()
self.assertEqual(2, client.roles.delete.call_count)
@mock.patch("rally_openstack.common.services.image.image.Image")
def test__cleanup_images(self, mock_image):
self.context._created_images = [fakes.FakeImage(id="id1"),
fakes.FakeImage(id="id2")]
self.context.conf.set("compute", "image_ref", "id1")
self.context.conf.set("compute", "image_ref_alt", "id2")
image_service = mock_image.return_value
image_service.get_image.side_effect = [
fakes.FakeImage(id="id1", status="DELETED"),
fakes.FakeImage(id="id2"),
fakes.FakeImage(id="id2", status="DELETED")]
self.context._cleanup_images()
client = self.context.clients.glance()
client.images.delete.assert_has_calls([mock.call("id1"),
mock.call("id2")])
self.assertEqual("", self.context.conf.get("compute", "image_ref"))
self.assertEqual("", self.context.conf.get("compute", "image_ref_alt"))
def test__cleanup_flavors(self):
self.context._created_flavors = [fakes.FakeFlavor(id="id1"),
fakes.FakeFlavor(id="id2"),
fakes.FakeFlavor(id="id3")]
self.context.conf.set("compute", "flavor_ref", "id1")
self.context.conf.set("compute", "flavor_ref_alt", "id2")
self.context.conf.set("orchestration", "instance_type", "id3")
self.context._cleanup_flavors()
client = self.context.clients.nova()
self.assertEqual(3, client.flavors.delete.call_count)
self.assertEqual("", self.context.conf.get("compute", "flavor_ref"))
self.assertEqual("", self.context.conf.get("compute",
"flavor_ref_alt"))
self.assertEqual("", self.context.conf.get("orchestration",
"instance_type"))
@mock.patch("%s.neutron.NeutronService.delete_network_topology" % PATH)
def test__cleanup_network_resources(self, mock_delete_network_topology):
self.context._created_networks = [{"network": {"name": "net-12345"}}]
self.context.conf.set("compute", "fixed_network_name", "net-12345")
self.context._cleanup_network_resources()
mock_delete_network_topology.assert_called_once_with(
self.context._created_networks[0]
)
self.assertEqual("", self.context.conf.get("compute",
"fixed_network_name"))
@mock.patch("%s.open" % PATH, side_effect=mock.mock_open())
@mock.patch("%s.TempestContext._configure_option" % PATH)
@mock.patch("%s.TempestContext._create_tempest_roles" % PATH)
@mock.patch("rally.verification.utils.create_dir")
def test_setup(self, mock_create_dir,
mock__create_tempest_roles, mock__configure_option,
mock_open):
verifier = mock.Mock(env=self.env, version="27.1.0")
verifier.manager.home_dir = "/p/a/t/h"
# case #1: no neutron and heat
self.cred.clients.return_value.services.return_value = {}
ctx = context.TempestContext({"verifier": verifier})
ctx.conf = mock.Mock()
ctx.setup()
ctx.conf.read.assert_called_once_with(verifier.manager.configfile)
mock_create_dir.assert_called_once_with(ctx.data_dir)
mock__create_tempest_roles.assert_called_once_with()
mock_open.assert_called_once_with(verifier.manager.configfile, "w")
ctx.conf.write(mock_open.side_effect())
self.assertEqual(
[mock.call("DEFAULT", "log_file", "/p/a/t/h/tempest.log"),
mock.call("oslo_concurrency", "lock_path", "/p/a/t/h/lock_files"),
mock.call("scenario", "img_file", "/p/a/t/h/" + ctx.image_name,
helper_method=ctx._download_image),
mock.call("compute", "image_ref",
helper_method=ctx._discover_or_create_image),
mock.call("compute", "image_ref_alt",
helper_method=ctx._discover_or_create_image),
mock.call("compute", "flavor_ref",
helper_method=ctx._discover_or_create_flavor,
flv_ram=config.CONF.openstack.flavor_ref_ram,
flv_disk=config.CONF.openstack.flavor_ref_disk),
mock.call("compute", "flavor_ref_alt",
helper_method=ctx._discover_or_create_flavor,
flv_ram=config.CONF.openstack.flavor_ref_alt_ram,
flv_disk=config.CONF.openstack.flavor_ref_alt_disk)],
mock__configure_option.call_args_list)
mock_create_dir.reset_mock()
mock__create_tempest_roles.reset_mock()
mock_open.reset_mock()
mock__configure_option.reset_mock()
# case #2: neutron and heat are presented
self.cred.clients.return_value.services.return_value = {
"network": "neutron", "orchestration": "heat"}
ctx = context.TempestContext({"verifier": verifier})
neutron = ctx.clients.neutron()
neutron.list_networks.return_value = {"networks": ["fake_net"]}
ctx.conf = mock.Mock()
ctx.setup()
ctx.conf.read.assert_called_once_with(verifier.manager.configfile)
mock_create_dir.assert_called_once_with(ctx.data_dir)
mock__create_tempest_roles.assert_called_once_with()
mock_open.assert_called_once_with(verifier.manager.configfile, "w")
ctx.conf.write(mock_open.side_effect())
self.assertEqual([
mock.call("DEFAULT", "log_file", "/p/a/t/h/tempest.log"),
mock.call("oslo_concurrency", "lock_path", "/p/a/t/h/lock_files"),
mock.call("scenario", "img_file", "/p/a/t/h/" + ctx.image_name,
helper_method=ctx._download_image),
mock.call("compute", "image_ref",
helper_method=ctx._discover_or_create_image),
mock.call("compute", "image_ref_alt",
helper_method=ctx._discover_or_create_image),
mock.call("compute", "flavor_ref",
helper_method=ctx._discover_or_create_flavor,
flv_ram=config.CONF.openstack.flavor_ref_ram,
flv_disk=config.CONF.openstack.flavor_ref_disk),
mock.call("compute", "flavor_ref_alt",
helper_method=ctx._discover_or_create_flavor,
flv_ram=config.CONF.openstack.flavor_ref_alt_ram,
flv_disk=config.CONF.openstack.flavor_ref_alt_disk),
mock.call("compute", "fixed_network_name",
helper_method=ctx._create_network_resources),
mock.call("orchestration", "instance_type",
helper_method=ctx._discover_or_create_flavor,
flv_ram=config.CONF.openstack.heat_instance_type_ram,
flv_disk=config.CONF.openstack.heat_instance_type_disk)
], mock__configure_option.call_args_list)
# case 3: tempest is old.
verifier.version = "17.0.0"
ctx = context.TempestContext({"verifier": verifier})
ctx.conf = mock.Mock()
ctx.setup()
mock__configure_option.assert_has_calls(
[
mock.call("scenario", "img_dir", "/p/a/t/h"),
mock.call("scenario", "img_file", ctx.image_name,
helper_method=ctx._download_image)
]
)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,729
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/zaqar/test_utils.py
|
# Copyright (c) 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.scenarios.zaqar import utils
from tests.unit import fakes
from tests.unit import test
UTILS = "rally_openstack.task.scenarios.zaqar.utils."
class ZaqarScenarioTestCase(test.ScenarioTestCase):
@mock.patch(UTILS + "ZaqarScenario.generate_random_name",
return_value="kitkat")
def test_queue_create(self, mock_generate_random_name):
scenario = utils.ZaqarScenario(self.context)
result = scenario._queue_create(fakearg="fakearg")
self.assertEqual(self.clients("zaqar").queue.return_value, result)
self.clients("zaqar").queue.assert_called_once_with("kitkat",
fakearg="fakearg")
self._test_atomic_action_timer(scenario.atomic_actions(),
"zaqar.create_queue")
def test_queue_delete(self):
queue = fakes.FakeQueue()
queue.delete = mock.MagicMock()
scenario = utils.ZaqarScenario(context=self.context)
scenario._queue_delete(queue)
queue.delete.assert_called_once_with()
self._test_atomic_action_timer(scenario.atomic_actions(),
"zaqar.delete_queue")
def test_messages_post(self):
queue = fakes.FakeQueue()
queue.post = mock.MagicMock()
messages = [{"body": {"id": "one"}, "ttl": 100},
{"body": {"id": "two"}, "ttl": 120},
{"body": {"id": "three"}, "ttl": 140}]
min_msg_count = max_msg_count = len(messages)
scenario = utils.ZaqarScenario(context=self.context)
scenario._messages_post(queue, messages, min_msg_count, max_msg_count)
queue.post.assert_called_once_with(messages)
def test_messages_list(self):
queue = fakes.FakeQueue()
queue.messages = mock.MagicMock()
scenario = utils.ZaqarScenario(context=self.context)
scenario._messages_list(queue)
queue.messages.assert_called_once_with()
self._test_atomic_action_timer(scenario.atomic_actions(),
"zaqar.list_messages")
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,730
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/cleanup/resources.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import cfg
from rally.common import logging
from rally.task import utils as task_utils
from rally_openstack.common.services.identity import identity
from rally_openstack.common.services.image import glance_v2
from rally_openstack.common.services.image import image
from rally_openstack.common.services.network import neutron
from rally_openstack.task.cleanup import base
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def get_order(start):
return iter(range(start, start + 99))
class SynchronizedDeletion(object):
def is_deleted(self):
return True
class QuotaMixin(SynchronizedDeletion, base.ResourceManager):
# NOTE(andreykurilin): Quotas resources are quite complex in terms of
# cleanup. First of all, they do not have name, id fields at all. There
# is only one identifier - reference to Keystone Project/Tenant. Also,
# we should remove them in case of existing users case... To cover both
# cases we should use project name as name field (it will allow to pass
# existing users case) and project id as id of resource
def list(self):
if not self.tenant_uuid:
return []
client = self._admin_required and self.admin or self.user
project = identity.Identity(client).get_project(self.tenant_uuid)
return [project]
# MAGNUM
_magnum_order = get_order(80)
@base.resource(service=None, resource=None)
class MagnumMixin(base.ResourceManager):
def id(self):
"""Returns id of resource."""
return self.raw_resource.uuid
def list(self):
result = []
marker = None
while True:
resources = self._manager().list(marker=marker)
if not resources:
break
result.extend(resources)
marker = resources[-1].uuid
return result
@base.resource("magnum", "clusters", order=next(_magnum_order),
tenant_resource=True)
class MagnumCluster(MagnumMixin):
"""Resource class for Magnum cluster."""
@base.resource("magnum", "cluster_templates", order=next(_magnum_order),
tenant_resource=True)
class MagnumClusterTemplate(MagnumMixin):
"""Resource class for Magnum cluster_template."""
# HEAT
@base.resource("heat", "stacks", order=100, tenant_resource=True)
class HeatStack(base.ResourceManager):
def name(self):
return self.raw_resource.stack_name
# SENLIN
_senlin_order = get_order(150)
@base.resource(service=None, resource=None, admin_required=True)
class SenlinMixin(base.ResourceManager):
def id(self):
return self.raw_resource["id"]
def _manager(self):
client = self._admin_required and self.admin or self.user
return getattr(client, self._service)()
def list(self):
return getattr(self._manager(), self._resource)()
def delete(self):
# make singular form of resource name from plural form
res_name = self._resource[:-1]
return getattr(self._manager(), "delete_%s" % res_name)(self.id())
@base.resource("senlin", "clusters",
admin_required=True, order=next(_senlin_order))
class SenlinCluster(SenlinMixin):
"""Resource class for Senlin Cluster."""
@base.resource("senlin", "profiles", order=next(_senlin_order),
admin_required=False, tenant_resource=True)
class SenlinProfile(SenlinMixin):
"""Resource class for Senlin Profile."""
# NOVA
_nova_order = get_order(200)
@base.resource("nova", "servers", order=next(_nova_order),
tenant_resource=True)
class NovaServer(base.ResourceManager):
def list(self):
"""List all servers."""
return self._manager().list(limit=-1)
def delete(self):
if getattr(self.raw_resource, "OS-EXT-STS:locked", False):
self.raw_resource.unlock()
super(NovaServer, self).delete()
@base.resource("nova", "server_groups", order=next(_nova_order),
tenant_resource=True)
class NovaServerGroups(base.ResourceManager):
pass
@base.resource("nova", "keypairs", order=next(_nova_order))
class NovaKeypair(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("nova", "quotas", order=next(_nova_order),
admin_required=True, tenant_resource=True)
class NovaQuotas(QuotaMixin):
pass
@base.resource("nova", "flavors", order=next(_nova_order),
admin_required=True, perform_for_admin_only=True)
class NovaFlavors(base.ResourceManager):
pass
def is_deleted(self):
from novaclient import exceptions as nova_exc
try:
self._manager().get(self.name())
except nova_exc.NotFound:
return True
return False
@base.resource("nova", "aggregates", order=next(_nova_order),
admin_required=True, perform_for_admin_only=True)
class NovaAggregate(SynchronizedDeletion, base.ResourceManager):
def delete(self):
for host in self.raw_resource.hosts:
self.raw_resource.remove_host(host)
super(NovaAggregate, self).delete()
# NEUTRON
_neutron_order = get_order(300)
@base.resource(service=None, resource=None, admin_required=True)
class NeutronMixin(SynchronizedDeletion, base.ResourceManager):
@property
def _neutron(self):
return neutron.NeutronService(
self._admin_required and self.admin or self.user)
def _manager(self):
client = self._admin_required and self.admin or self.user
return getattr(client, self._service)()
def id(self):
return self.raw_resource["id"]
def name(self):
return self.raw_resource["name"]
def delete(self):
key = "delete_%s" % self._resource
delete_method = getattr(
self._neutron, key, getattr(self._manager(), key)
)
delete_method(self.id())
@property
def _plural_key(self):
if self._resource.endswith("y"):
return self._resource[:-1] + "ies"
else:
return self._resource + "s"
def list(self):
list_method = getattr(self._manager(), "list_%s" % self._plural_key)
result = list_method(tenant_id=self.tenant_uuid)[self._plural_key]
if self.tenant_uuid:
result = [r for r in result if r["tenant_id"] == self.tenant_uuid]
return result
class NeutronLbaasV1Mixin(NeutronMixin):
def list(self):
if self._neutron.supports_extension("lbaas", silent=True):
return super(NeutronLbaasV1Mixin, self).list()
return []
@base.resource("neutron", "vip", order=next(_neutron_order),
tenant_resource=True)
class NeutronV1Vip(NeutronLbaasV1Mixin):
pass
@base.resource("neutron", "health_monitor", order=next(_neutron_order),
tenant_resource=True)
class NeutronV1Healthmonitor(NeutronLbaasV1Mixin):
pass
@base.resource("neutron", "pool", order=next(_neutron_order),
tenant_resource=True)
class NeutronV1Pool(NeutronLbaasV1Mixin):
pass
class NeutronLbaasV2Mixin(NeutronMixin):
def list(self):
if self._neutron.supports_extension("lbaasv2", silent=True):
return super(NeutronLbaasV2Mixin, self).list()
return []
@base.resource("neutron", "loadbalancer", order=next(_neutron_order),
tenant_resource=True)
class NeutronV2Loadbalancer(NeutronLbaasV2Mixin):
def is_deleted(self):
try:
self._manager().show_loadbalancer(self.id())
except Exception as e:
return getattr(e, "status_code", 400) == 404
return False
# OCTAVIA
class OctaviaMixIn(NeutronMixin):
@property
def _client(self):
# TODO(andreykurilin): use proper helper class from
# rally_openstack.common.services as soon as it will have unified
# style of arguments across all methods
client = self.admin or self.user
return getattr(client, self._service)()
def delete(self):
from octaviaclient.api.v2 import octavia as octavia_exc
delete_method = getattr(self._client, "%s_delete" % self._resource)
try:
return delete_method(self.id())
except octavia_exc.OctaviaClientException as e:
if e.code == 409 and "Invalid state PENDING_DELETE" in e.message:
# NOTE(andreykurilin): it is not ok. Probably this resource
# is not properly cleanup-ed (without wait-for loop)
# during the workload. No need to fail, continue silently.
return
raise
def is_deleted(self):
from osc_lib import exceptions as osc_exc
show_method = getattr(self._client, "%s_show" % self._resource)
try:
show_method(self.id())
except osc_exc.NotFound:
return True
return False
def list(self):
list_method = getattr(self._client, "%s_list" % self._resource)
return list_method()[self._plural_key.replace("_", "")]
@base.resource("octavia", "load_balancer", order=next(_neutron_order),
tenant_resource=True)
class OctaviaLoadBalancers(OctaviaMixIn):
def delete(self):
from octaviaclient.api.v2 import octavia as octavia_exc
delete_method = getattr(self._client, "load_balancer_delete")
try:
return delete_method(self.id(), cascade=True)
except octavia_exc.OctaviaClientException as e:
if e.code == 409 and "Invalid state PENDING_DELETE" in e.message:
# NOTE(andreykurilin): it is not ok. Probably this resource
# is not properly cleanup-ed (without wait-for loop)
# during the workload. No need to fail, continue silently.
return
raise
@base.resource("octavia", "pool", order=next(_neutron_order),
tenant_resource=True)
class OctaviaPools(OctaviaMixIn):
pass
@base.resource("octavia", "listener", order=next(_neutron_order),
tenant_resource=True)
class OctaviaListeners(OctaviaMixIn):
pass
@base.resource("octavia", "l7policy", order=next(_neutron_order),
tenant_resource=True)
class OctaviaL7Policies(OctaviaMixIn):
pass
@base.resource("octavia", "health_monitor", order=next(_neutron_order),
tenant_resource=True)
class OctaviaHealthMonitors(OctaviaMixIn):
pass
@base.resource("neutron", "bgpvpn", order=next(_neutron_order),
admin_required=True, perform_for_admin_only=True)
class NeutronBgpvpn(NeutronMixin):
def list(self):
if self._neutron.supports_extension("bgpvpn", silent=True):
return self._manager().list_bgpvpns()["bgpvpns"]
return []
@base.resource("neutron", "floatingip", order=next(_neutron_order),
tenant_resource=True)
class NeutronFloatingIP(NeutronMixin):
def name(self):
return self.raw_resource.get("description", "")
def list(self):
if CONF.openstack.pre_newton_neutron:
# NOTE(andreykurilin): Neutron API of pre-newton openstack
# releases does not support description field in Floating IPs.
# We do not want to remove not-rally resources, so let's just do
# nothing here and move pre-newton logic into separate plugins
return []
return super(NeutronFloatingIP, self).list()
@base.resource("neutron", "trunk", order=next(_neutron_order),
tenant_resource=True)
class NeutronTrunk(NeutronMixin):
# Trunks must be deleted before the parent/subports are deleted
def list(self):
try:
return super(NeutronTrunk, self).list()
except Exception as e:
if getattr(e, "status_code", 400) == 404:
return []
raise
@base.resource("neutron", "port", order=next(_neutron_order),
tenant_resource=True)
class NeutronPort(NeutronMixin):
# NOTE(andreykurilin): port is the kind of resource that can be created
# automatically. In this case it doesn't have name field which matches
# our resource name templates.
def __init__(self, *args, **kwargs):
super(NeutronPort, self).__init__(*args, **kwargs)
self._cache = {}
@property
def ROUTER_INTERFACE_OWNERS(self):
return self._neutron.ROUTER_INTERFACE_OWNERS
@property
def ROUTER_GATEWAY_OWNER(self):
return self._neutron.ROUTER_GATEWAY_OWNER
def _get_resources(self, resource):
if resource not in self._cache:
resources = getattr(self._neutron, "list_%s" % resource)()
self._cache[resource] = [r for r in resources
if r["tenant_id"] == self.tenant_uuid]
return self._cache[resource]
def list(self):
ports = self._get_resources("ports")
for port in ports:
if not port.get("name"):
parent_name = None
if (port["device_owner"] in self.ROUTER_INTERFACE_OWNERS
or port["device_owner"] == self.ROUTER_GATEWAY_OWNER):
# first case is a port created while adding an interface to
# the subnet
# second case is a port created while adding gateway for
# the network
port_router = [r for r in self._get_resources("routers")
if r["id"] == port["device_id"]]
if port_router:
parent_name = port_router[0]["name"]
if parent_name:
port["parent_name"] = parent_name
return ports
def name(self):
return self.raw_resource.get("parent_name",
self.raw_resource.get("name", ""))
def delete(self):
found = self._neutron.delete_port(self.raw_resource)
if not found:
# Port can be already auto-deleted, skip silently
LOG.debug(f"Port {self.id()} was not deleted. Skip silently "
f"because port can be already auto-deleted.")
@base.resource("neutron", "subnet", order=next(_neutron_order),
tenant_resource=True)
class NeutronSubnet(NeutronMixin):
pass
@base.resource("neutron", "network", order=next(_neutron_order),
tenant_resource=True)
class NeutronNetwork(NeutronMixin):
pass
@base.resource("neutron", "router", order=next(_neutron_order),
tenant_resource=True)
class NeutronRouter(NeutronMixin):
pass
@base.resource("neutron", "security_group", order=next(_neutron_order),
tenant_resource=True)
class NeutronSecurityGroup(NeutronMixin):
def list(self):
try:
tenant_sgs = super(NeutronSecurityGroup, self).list()
# NOTE(pirsriva): Filter out "default" security group deletion
# by non-admin role user
return filter(lambda r: r["name"] != "default",
tenant_sgs)
except Exception as e:
if getattr(e, "status_code", 400) == 404:
return []
raise
@base.resource("neutron", "quota", order=next(_neutron_order),
admin_required=True, tenant_resource=True)
class NeutronQuota(QuotaMixin):
def delete(self):
self.admin.neutron().delete_quota(self.tenant_uuid)
# CINDER
_cinder_order = get_order(400)
@base.resource("cinder", "backups", order=next(_cinder_order),
tenant_resource=True)
class CinderVolumeBackup(base.ResourceManager):
pass
@base.resource("cinder", "volume_types", order=next(_cinder_order),
admin_required=True, perform_for_admin_only=True)
class CinderVolumeType(base.ResourceManager):
pass
@base.resource("cinder", "volume_snapshots", order=next(_cinder_order),
tenant_resource=True)
class CinderVolumeSnapshot(base.ResourceManager):
pass
@base.resource("cinder", "transfers", order=next(_cinder_order),
tenant_resource=True)
class CinderVolumeTransfer(base.ResourceManager):
pass
@base.resource("cinder", "volumes", order=next(_cinder_order),
tenant_resource=True)
class CinderVolume(base.ResourceManager):
pass
@base.resource("cinder", "image_volumes_cache", order=next(_cinder_order),
admin_required=True, perform_for_admin_only=True)
class CinderImageVolumeCache(base.ResourceManager):
def _glance(self):
return image.Image(self.admin)
def _manager(self):
return self.admin.cinder().volumes
def list(self):
images = dict(("image-%s" % i.id, i)
for i in self._glance().list_images())
return [{"volume": v, "image": images[v.name]}
for v in self._manager().list(search_opts={"all_tenants": 1})
if v.name in images]
def name(self):
return self.raw_resource["image"].name
def id(self):
return self.raw_resource["volume"].id
@base.resource("cinder", "quotas", order=next(_cinder_order),
admin_required=True, tenant_resource=True)
class CinderQuotas(QuotaMixin, base.ResourceManager):
pass
@base.resource("cinder", "qos_specs", order=next(_cinder_order),
admin_required=True, perform_for_admin_only=True)
class CinderQos(base.ResourceManager):
pass
# MANILA
_manila_order = get_order(450)
@base.resource("manila", "shares", order=next(_manila_order),
tenant_resource=True)
class ManilaShare(base.ResourceManager):
pass
@base.resource("manila", "share_networks", order=next(_manila_order),
tenant_resource=True)
class ManilaShareNetwork(base.ResourceManager):
pass
@base.resource("manila", "security_services", order=next(_manila_order),
tenant_resource=True)
class ManilaSecurityService(base.ResourceManager):
pass
# GLANCE
@base.resource("glance", "images", order=500, tenant_resource=True)
class GlanceImage(base.ResourceManager):
def _client(self):
return image.Image(self.admin or self.user)
def list(self):
images = (self._client().list_images(owner=self.tenant_uuid)
+ self._client().list_images(status="deactivated",
owner=self.tenant_uuid))
return images
def delete(self):
client = self._client()
if self.raw_resource.status == "deactivated":
glancev2 = glance_v2.GlanceV2Service(self.admin or self.user)
glancev2.reactivate_image(self.raw_resource.id)
client.delete_image(self.raw_resource.id)
task_utils.wait_for_status(
self.raw_resource, ["deleted"],
check_deletion=True,
update_resource=self._client().get_image,
timeout=CONF.openstack.glance_image_delete_timeout,
check_interval=CONF.openstack.glance_image_delete_poll_interval)
# SAHARA
_sahara_order = get_order(600)
@base.resource("sahara", "job_executions", order=next(_sahara_order),
tenant_resource=True)
class SaharaJobExecution(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "jobs", order=next(_sahara_order),
tenant_resource=True)
class SaharaJob(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "job_binary_internals", order=next(_sahara_order),
tenant_resource=True)
class SaharaJobBinaryInternals(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "job_binaries", order=next(_sahara_order),
tenant_resource=True)
class SaharaJobBinary(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "data_sources", order=next(_sahara_order),
tenant_resource=True)
class SaharaDataSource(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "clusters", order=next(_sahara_order),
tenant_resource=True)
class SaharaCluster(base.ResourceManager):
# Need special treatment for Sahara Cluster because of the way the
# exceptions are described in:
# https://github.com/openstack/python-saharaclient/blob/master/
# saharaclient/api/base.py#L145
def is_deleted(self):
from saharaclient.api import base as saharaclient_base
try:
self._manager().get(self.id())
return False
except saharaclient_base.APIException as e:
return e.error_code == 404
@base.resource("sahara", "cluster_templates", order=next(_sahara_order),
tenant_resource=True)
class SaharaClusterTemplate(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "node_group_templates", order=next(_sahara_order),
tenant_resource=True)
class SaharaNodeGroup(SynchronizedDeletion, base.ResourceManager):
pass
# CEILOMETER
@base.resource("ceilometer", "alarms", order=700, tenant_resource=True)
class CeilometerAlarms(SynchronizedDeletion, base.ResourceManager):
def id(self):
return self.raw_resource.alarm_id
def list(self):
query = [{
"field": "project_id",
"op": "eq",
"value": self.tenant_uuid
}]
return self._manager().list(q=query)
# ZAQAR
@base.resource("zaqar", "queues", order=800)
class ZaqarQueues(SynchronizedDeletion, base.ResourceManager):
def list(self):
return self.user.zaqar().queues()
# DESIGNATE
_designate_order = get_order(900)
class DesignateResource(SynchronizedDeletion, base.ResourceManager):
# TODO(boris-42): This should be handled somewhere else.
NAME_PREFIX = "s_rally_"
def _manager(self, resource=None):
# Map resource names to api / client version
resource = resource or self._resource
version = {
"domains": "1",
"servers": "1",
"records": "1",
"recordsets": "2",
"zones": "2"
}[resource]
client = self._admin_required and self.admin or self.user
return getattr(getattr(client, self._service)(version), resource)
def id(self):
"""Returns id of resource."""
return self.raw_resource["id"]
def name(self):
"""Returns name of resource."""
return self.raw_resource["name"]
def list(self):
return [item for item in self._manager().list()
if item["name"].startswith(self.NAME_PREFIX)]
@base.resource("designate", "servers", order=next(_designate_order),
admin_required=True, perform_for_admin_only=True, threads=1)
class DesignateServer(DesignateResource):
pass
@base.resource("designate", "zones", order=next(_designate_order),
tenant_resource=True, threads=1)
class DesignateZones(DesignateResource):
def list(self):
marker = None
criterion = {"name": "%s*" % self.NAME_PREFIX}
while True:
items = self._manager().list(marker=marker, limit=100,
criterion=criterion)
if not items:
break
for item in items:
yield item
marker = items[-1]["id"]
# SWIFT
_swift_order = get_order(1000)
class SwiftMixin(SynchronizedDeletion, base.ResourceManager):
def _manager(self):
client = self._admin_required and self.admin or self.user
return getattr(client, self._service)()
def id(self):
return self.raw_resource
def name(self):
# NOTE(stpierre): raw_resource is a list of either [container
# name, object name] (as in SwiftObject) or just [container
# name] (as in SwiftContainer).
return self.raw_resource[-1]
def delete(self):
delete_method = getattr(self._manager(), "delete_%s" % self._resource)
# NOTE(weiwu): *self.raw_resource is required because for deleting
# container we are passing only container name, to delete object we
# should pass as first argument container and second is object name.
delete_method(*self.raw_resource)
@base.resource("swift", "object", order=next(_swift_order),
tenant_resource=True)
class SwiftObject(SwiftMixin):
def list(self):
object_list = []
containers = self._manager().get_account(full_listing=True)[1]
for con in containers:
objects = self._manager().get_container(con["name"],
full_listing=True)[1]
for obj in objects:
raw_resource = [con["name"], obj["name"]]
object_list.append(raw_resource)
return object_list
@base.resource("swift", "container", order=next(_swift_order),
tenant_resource=True)
class SwiftContainer(SwiftMixin):
def list(self):
containers = self._manager().get_account(full_listing=True)[1]
return [[con["name"]] for con in containers]
# MISTRAL
_mistral_order = get_order(1100)
@base.resource("mistral", "workbooks", order=next(_mistral_order),
tenant_resource=True)
class MistralWorkbooks(SynchronizedDeletion, base.ResourceManager):
def delete(self):
self._manager().delete(self.raw_resource.name)
@base.resource("mistral", "workflows", order=next(_mistral_order),
tenant_resource=True)
class MistralWorkflows(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("mistral", "executions", order=next(_mistral_order),
tenant_resource=True)
class MistralExecutions(SynchronizedDeletion, base.ResourceManager):
def name(self):
# NOTE(andreykurilin): Mistral Execution doesn't have own name which
# we can use for filtering, but it stores workflow id and name, even
# after workflow deletion.
return self.raw_resource.workflow_name
# MURANO
_murano_order = get_order(1200)
@base.resource("murano", "environments", tenant_resource=True,
order=next(_murano_order))
class MuranoEnvironments(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("murano", "packages", tenant_resource=True,
order=next(_murano_order))
class MuranoPackages(base.ResourceManager):
def list(self):
return filter(lambda x: x.name != "Core library",
super(MuranoPackages, self).list())
# IRONIC
_ironic_order = get_order(1300)
@base.resource("ironic", "node", admin_required=True,
order=next(_ironic_order), perform_for_admin_only=True)
class IronicNodes(base.ResourceManager):
def id(self):
return self.raw_resource.uuid
# GNOCCHI
_gnocchi_order = get_order(1400)
class GnocchiMixin(base.ResourceManager):
def name(self):
return self.raw_resource["name"]
def id(self):
return self.raw_resource["name"]
@base.resource("gnocchi", "archive_policy_rule", order=next(_gnocchi_order),
admin_required=True, perform_for_admin_only=True)
class GnocchiArchivePolicyRule(GnocchiMixin):
pass
@base.resource("gnocchi", "archive_policy", order=next(_gnocchi_order),
admin_required=True, perform_for_admin_only=True)
class GnocchiArchivePolicy(GnocchiMixin):
pass
@base.resource("gnocchi", "resource_type", order=next(_gnocchi_order),
admin_required=True, perform_for_admin_only=True)
class GnocchiResourceType(GnocchiMixin):
pass
@base.resource("gnocchi", "metric", order=next(_gnocchi_order),
tenant_resource=True)
class GnocchiMetric(GnocchiMixin):
def id(self):
return self.raw_resource["id"]
def list(self):
result = []
marker = None
while True:
metrics = self._manager().list(marker=marker)
if not metrics:
break
result.extend(metrics)
marker = metrics[-1]["id"]
if self.tenant_uuid:
result = [r for r in result
if r["creator"].partition(":")[2] == self.tenant_uuid]
return result
@base.resource("gnocchi", "resource", order=next(_gnocchi_order),
tenant_resource=True)
class GnocchiResource(GnocchiMixin):
def id(self):
return self.raw_resource["id"]
def name(self):
return self.raw_resource["original_resource_id"]
def is_deleted(self):
from gnocchiclient import exceptions as gnocchi_exc
try:
self._manager().get(self.raw_resource["type"], self.id())
except gnocchi_exc.NotFound:
return True
return False
def list(self):
result = []
marker = None
while True:
resources = self._manager().list(marker=marker)
if not resources:
break
result.extend(resources)
marker = resources[-1]["id"]
return result
# WATCHER
_watcher_order = get_order(1500)
class WatcherMixin(SynchronizedDeletion, base.ResourceManager):
def id(self):
return self.raw_resource.uuid
def list(self):
return self._manager().list(limit=0)
def is_deleted(self):
from watcherclient.common.apiclient import exceptions
try:
self._manager().get(self.id())
return False
except exceptions.NotFound:
return True
@base.resource("watcher", "audit_template", order=next(_watcher_order),
admin_required=True, perform_for_admin_only=True)
class WatcherTemplate(WatcherMixin):
pass
@base.resource("watcher", "action_plan", order=next(_watcher_order),
admin_required=True, perform_for_admin_only=True)
class WatcherActionPlan(WatcherMixin):
def name(self):
return base.NoName(self._resource)
@base.resource("watcher", "audit", order=next(_watcher_order),
admin_required=True, perform_for_admin_only=True)
class WatcherAudit(WatcherMixin):
def name(self):
return self.raw_resource.uuid
# KEYSTONE
_keystone_order = get_order(9000)
class KeystoneMixin(SynchronizedDeletion):
def _manager(self):
return identity.Identity(self.admin)
def delete(self):
delete_method = getattr(self._manager(), "delete_%s" % self._resource)
delete_method(self.id())
def list(self):
resources = self._resource + "s"
return getattr(self._manager(), "list_%s" % resources)()
@base.resource("keystone", "user", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneUser(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "project", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneProject(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "service", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneService(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "role", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneRole(KeystoneMixin, base.ResourceManager):
pass
# NOTE(andreykurilin): unfortunately, ec2 credentials doesn't have name
# and id fields. It makes impossible to identify resources belonging to
# particular task.
@base.resource("keystone", "ec2", tenant_resource=True,
order=next(_keystone_order))
class KeystoneEc2(SynchronizedDeletion, base.ResourceManager):
def _manager(self):
return identity.Identity(self.user)
def id(self):
return "n/a"
def name(self):
return base.NoName(self._resource)
@property
def user_id(self):
return self.user.keystone.auth_ref.user_id
def list(self):
return self._manager().list_ec2credentials(self.user_id)
def delete(self):
self._manager().delete_ec2credential(
self.user_id, access=self.raw_resource.access)
# BARBICAN
@base.resource("barbican", "secrets", order=1500, admin_required=True,
perform_for_admin_only=True)
class BarbicanSecrets(base.ResourceManager):
def id(self):
return self.raw_resource.secret_ref
def is_deleted(self):
try:
self._manager().get(self.id()).status
except Exception:
return True
return False
@base.resource("barbican", "containers", order=1500, admin_required=True,
perform_for_admin_only=True)
class BarbicanContainers(base.ResourceManager):
pass
@base.resource("barbican", "orders", order=1500, admin_required=True,
perform_for_admin_only=True)
class BarbicanOrders(base.ResourceManager):
pass
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,731
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/manila/manila_shares.py
|
# Copyright 2016 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import cfg
from rally.common import validation
from rally_openstack.common import consts as rally_consts
from rally_openstack.task.cleanup import manager as resource_manager
from rally_openstack.task import context
from rally_openstack.task.contexts.manila import consts
from rally_openstack.task.scenarios.manila import utils as manila_utils
CONF = cfg.CONF
CONTEXT_NAME = consts.SHARES_CONTEXT_NAME
@validation.add("required_platform", platform="openstack", users=True)
@context.configure(name=CONTEXT_NAME, platform="openstack", order=455)
class Shares(context.OpenStackContext):
"""This context creates shares for Manila project."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": rally_consts.JSON_SCHEMA,
"properties": {
"shares_per_tenant": {
"type": "integer",
"minimum": 1,
},
"size": {
"type": "integer",
"minimum": 1
},
"share_proto": {
"type": "string",
},
"share_type": {
"type": "string",
},
},
"additionalProperties": False
}
DEFAULT_CONFIG = {
"shares_per_tenant": 1,
"size": 1,
"share_proto": "NFS",
"share_type": None,
}
def _create_shares(self, manila_scenario, tenant_id, share_proto, size=1,
share_type=None):
tenant_ctxt = self.context["tenants"][tenant_id]
tenant_ctxt.setdefault("shares", [])
for i in range(self.config["shares_per_tenant"]):
kwargs = {"share_proto": share_proto, "size": size}
if share_type:
kwargs["share_type"] = share_type
share_networks = tenant_ctxt.get("manila_share_networks", {}).get(
"share_networks", [])
if share_networks:
kwargs["share_network"] = share_networks[
i % len(share_networks)]["id"]
share = manila_scenario._create_share(**kwargs)
tenant_ctxt["shares"].append(share.to_dict())
def setup(self):
for user, tenant_id in self._iterate_per_tenants():
manila_scenario = manila_utils.ManilaScenario({
"task": self.task,
"owner_id": self.context["owner_id"],
"user": user
})
self._create_shares(
manila_scenario,
tenant_id,
self.config["share_proto"],
self.config["size"],
self.config["share_type"],
)
def cleanup(self):
resource_manager.cleanup(
names=["manila.shares"],
users=self.context.get("users", []),
superclass=manila_utils.ManilaScenario,
task_id=self.get_owner_id())
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,732
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/gnocchi/utils.py
|
# Copyright 2017 Red Hat, Inc. <http://www.redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally_openstack.common.services.gnocchi import metric
from rally_openstack.task import scenario
class GnocchiBase(scenario.OpenStackScenario):
"""Base class for Gnocchi scenarios with basic atomic actions."""
def __init__(self, context=None, admin_clients=None, clients=None):
super(GnocchiBase, self).__init__(context, admin_clients, clients)
if hasattr(self, "_admin_clients"):
self.admin_gnocchi = metric.GnocchiService(
self._admin_clients, name_generator=self.generate_random_name,
atomic_inst=self.atomic_actions())
if hasattr(self, "_clients"):
self.gnocchi = metric.GnocchiService(
self._clients, name_generator=self.generate_random_name,
atomic_inst=self.atomic_actions())
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,733
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/contexts/neutron/test_lbaas.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.contexts.neutron import lbaas as lbaas_context
from tests.unit import test
NET = "rally_openstack.common.wrappers.network."
class LbaasTestCase(test.TestCase):
def get_context(self, **kwargs):
foo_tenant = {"networks": [{"id": "foo_net",
"tenant_id": "foo_tenant",
"subnets": ["foo_subnet"]}]}
bar_tenant = {"networks": [{"id": "bar_net",
"tenant_id": "bar_tenant",
"subnets": ["bar_subnet"]}]}
return {"task": {"uuid": "foo_task"},
"admin": {"credential": "foo_admin"},
"users": [{"id": "foo_user", "tenant_id": "foo_tenant"},
{"id": "bar_user", "tenant_id": "bar_tenant"}],
"config": {"lbaas": kwargs},
"tenants": {"foo_tenant": foo_tenant,
"bar_tenant": bar_tenant}}
@mock.patch("rally_openstack.common.osclients.Clients")
@mock.patch(NET + "wrap", return_value="foo_service")
def test__init__default(self, mock_wrap, mock_clients):
context = lbaas_context.Lbaas(self.get_context())
self.assertEqual(
context.config["pool"]["lb_method"],
lbaas_context.Lbaas.DEFAULT_CONFIG["pool"]["lb_method"])
self.assertEqual(
context.config["pool"]["protocol"],
lbaas_context.Lbaas.DEFAULT_CONFIG["pool"]["protocol"])
self.assertEqual(
context.config["lbaas_version"],
lbaas_context.Lbaas.DEFAULT_CONFIG["lbaas_version"])
@mock.patch("rally_openstack.common.osclients.Clients")
@mock.patch(NET + "wrap", return_value="foo_service")
def test__init__explicit(self, mock_wrap, mock_clients):
context = lbaas_context.Lbaas(
self.get_context(pool={"lb_method": "LEAST_CONNECTIONS"}))
self.assertEqual(context.config["pool"]["lb_method"],
"LEAST_CONNECTIONS")
@mock.patch(NET + "wrap")
@mock.patch("rally_openstack.common.osclients.Clients")
def test_setup_with_lbaas(self, mock_clients, mock_wrap):
foo_net = {"id": "foo_net",
"tenant_id": "foo_tenant",
"subnets": ["foo_subnet"],
"lb_pools": [{"pool": {"id": "foo_pool",
"tenant_id": "foo_tenant"}}]}
bar_net = {"id": "bar_net",
"tenant_id": "bar_tenant",
"subnets": ["bar_subnet"],
"lb_pools": [{"pool": {"id": "bar_pool",
"tenant_id": "bar_tenant"}}]}
expected_net = [bar_net, foo_net]
mock_create = mock.Mock(
side_effect=lambda t, s,
**kw: {"pool": {"id": str(t.split("_")[0]) + "_pool",
"tenant_id": t}})
actual_net = []
mock_wrap.return_value = mock.Mock(create_v1_pool=mock_create)
net_wrapper = mock_wrap(mock_clients.return_value)
net_wrapper.supports_extension.return_value = (True, None)
fake_args = {"lbaas_version": 1}
lb_context = lbaas_context.Lbaas(self.get_context(**fake_args))
lb_context._iterate_per_tenants = mock.MagicMock(
return_value=[
("foo_user", "foo_tenant"),
("bar_user", "bar_tenant")]
)
lb_context.setup()
lb_context._iterate_per_tenants.assert_called_once_with()
net_wrapper.supports_extension.assert_called_once_with("lbaas")
for tenant_id, tenant_ctx in (
sorted(lb_context.context["tenants"].items())):
for network in tenant_ctx["networks"]:
actual_net.append(network)
self.assertEqual(expected_net, actual_net)
@mock.patch(NET + "wrap")
@mock.patch("rally_openstack.common.osclients.Clients")
def test_setup_with_no_lbaas(self, mock_clients, mock_wrap):
mock_create = mock.Mock(side_effect=lambda t, **kw: t + "-net")
mock_wrap.return_value = mock.Mock(create_v1_pool=mock_create)
fake_args = {"lbaas_version": 1}
lb_context = lbaas_context.Lbaas(self.get_context(**fake_args))
net_wrapper = mock_wrap(mock_clients.return_value)
net_wrapper.supports_extension.return_value = (False, None)
lb_context._iterate_per_tenants = mock.MagicMock(
return_value=[("bar_user", "bar_tenant")]
)
lb_context.setup()
lb_context._iterate_per_tenants.assert_not_called()
net_wrapper.supports_extension.assert_called_once_with("lbaas")
assert not net_wrapper.create_v1_pool.called
@mock.patch(NET + "wrap")
@mock.patch("rally_openstack.common.osclients.Clients")
def test_setup_with_lbaas_version_not_one(self, mock_clients, mock_wrap):
mock_create = mock.Mock(side_effect=lambda t, **kw: t + "-net")
mock_wrap.return_value = mock.Mock(create_v1_pool=mock_create)
fake_args = {"lbaas_version": 2}
lb_context = lbaas_context.Lbaas(self.get_context(**fake_args))
lb_context._iterate_per_tenants = mock.MagicMock(
return_value=[("bar_user", "bar_tenant")]
)
net_wrapper = mock_wrap(mock_clients.return_value)
net_wrapper.supports_extension.return_value = (True, None)
self.assertRaises(NotImplementedError, lb_context.setup)
@mock.patch("rally_openstack.common.osclients.Clients")
@mock.patch(NET + "wrap")
def test_cleanup(self, mock_wrap, mock_clients):
net_wrapper = mock_wrap(mock_clients.return_value)
lb_context = lbaas_context.Lbaas(self.get_context())
expected_pools = []
for tenant_id, tenant_ctx in lb_context.context["tenants"].items():
resultant_pool = {"pool": {
"id": str(tenant_id.split("_")[0]) + "_pool"}}
expected_pools.append(resultant_pool)
for network in (
lb_context.context["tenants"][tenant_id]["networks"]):
network.setdefault("lb_pools", []).append(resultant_pool)
lb_context.cleanup()
net_wrapper.delete_v1_pool.assert_has_calls(
[mock.call(pool["pool"]["id"]) for pool in expected_pools])
@mock.patch("rally_openstack.common.osclients.Clients")
@mock.patch(NET + "wrap")
def test_cleanup_lbaas_version_not_one(self, mock_wrap, mock_clients):
fakeargs = {"lbaas_version": 2}
net_wrapper = mock_wrap(mock_clients.return_value)
lb_context = lbaas_context.Lbaas(self.get_context(**fakeargs))
for tenant_id, tenant_ctx in lb_context.context["tenants"].items():
resultant_pool = {"pool": {
"id": str(tenant_id.split("_")[0]) + "_pool"}}
for network in (
lb_context.context["tenants"][tenant_id]["networks"]):
network.setdefault("lb_pools", []).append(resultant_pool)
lb_context.cleanup()
assert not net_wrapper.delete_v1_pool.called
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,734
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/gnocchi/archive_policy.py
|
# Copyright 2017 Red Hat, Inc. <http://www.redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.gnocchi import utils as gnocchiutils
"""Scenarios for Gnocchi archive policy."""
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="GnocchiArchivePolicy.list_archive_policy")
class ListArchivePolicy(gnocchiutils.GnocchiBase):
def run(self):
"""List archive policies."""
self.gnocchi.list_archive_policy()
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(
context={"admin_cleanup@openstack": ["gnocchi.archive_policy"]},
name="GnocchiArchivePolicy.create_archive_policy")
class CreateArchivePolicy(gnocchiutils.GnocchiBase):
def run(self, definition=None, aggregation_methods=None):
"""Create archive policy.
:param definition: List of definitions
:param aggregation_methods: List of aggregation methods
"""
if definition is None:
definition = [{"granularity": "0:00:01", "timespan": "1:00:00"}]
name = self.generate_random_name()
self.admin_gnocchi.create_archive_policy(
name, definition=definition,
aggregation_methods=aggregation_methods)
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(
context={"admin_cleanup@openstack": ["gnocchi.archive_policy"]},
name="GnocchiArchivePolicy.create_delete_archive_policy")
class CreateDeleteArchivePolicy(gnocchiutils.GnocchiBase):
def run(self, definition=None, aggregation_methods=None):
"""Create archive policy and then delete it.
:param definition: List of definitions
:param aggregation_methods: List of aggregation methods
"""
if definition is None:
definition = [{"granularity": "0:00:01", "timespan": "1:00:00"}]
name = self.generate_random_name()
self.admin_gnocchi.create_archive_policy(
name, definition=definition,
aggregation_methods=aggregation_methods)
self.admin_gnocchi.delete_archive_policy(name)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,735
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/cleanup/base.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import validation
from rally_openstack.common import consts
from rally_openstack.task.cleanup import manager
@validation.configure("check_cleanup_resources")
class CheckCleanupResourcesValidator(validation.Validator):
def __init__(self, admin_required):
"""Validates that openstack resource managers exist
:param admin_required: describes access level to resource
"""
super(CheckCleanupResourcesValidator, self).__init__()
self.admin_required = admin_required
def validate(self, context, config, plugin_cls, plugin_cfg):
missing = set(plugin_cfg)
missing -= manager.list_resource_names(
admin_required=self.admin_required)
missing = ", ".join(missing)
if missing:
return self.fail(
"Couldn't find cleanup resource managers: %s" % missing)
class CleanupMixin(object):
CONFIG_SCHEMA = {
"type": "array",
"$schema": consts.JSON_SCHEMA,
"items": {
"type": "string",
}
}
def setup(self):
pass
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,736
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/loadbalancer/test_utils.py
|
# Copyright 2018: Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.scenarios.octavia import utils
from tests.unit import test
class LoadBalancerBaseTestCase(test.ScenarioTestCase):
def setUp(self):
super(LoadBalancerBaseTestCase, self).setUp()
self.context = super(LoadBalancerBaseTestCase, self).get_test_context()
self.context.update({
"admin": {
"id": "fake_user_id",
"credential": mock.MagicMock()
},
"user": {
"id": "fake_user_id",
"credential": mock.MagicMock()
},
"tenant": {"id": "fake_tenant_id",
"name": "fake_tenant_name"}
})
patch = mock.patch(
"rally_openstack.common.services.loadbalancer.octavia.Octavia")
self.addCleanup(patch.stop)
self.mock_service = patch.start()
def test_octavia_base(self):
base = utils.OctaviaBase(self.context)
self.assertEqual(base.octavia,
self.mock_service.return_value)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,737
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/contexts/designate/test_zones.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
from rally_openstack.task.contexts.designate import zones
from rally_openstack.task.scenarios.designate import utils
from tests.unit import test
CTX = "rally_openstack.task.contexts"
SCN = "rally_openstack.task.scenarios"
class ZoneGeneratorTestCase(test.ScenarioTestCase):
def _gen_tenants(self, count):
tenants = {}
for id_ in range(count):
tenants[str(id_)] = {"name": str(id_)}
return tenants
def test_init(self):
self.context.update({
"config": {
"zones": {
"zones_per_tenant": 5,
}
}
})
inst = zones.ZoneGenerator(self.context)
self.assertEqual(inst.config, self.context["config"]["zones"])
@mock.patch("%s.designate.utils.DesignateScenario._create_zone" % SCN,
return_value={"id": "uuid"})
def test_setup(self, mock_designate_scenario__create_zone):
tenants_count = 2
users_per_tenant = 5
zones_per_tenant = 5
tenants = self._gen_tenants(tenants_count)
users = []
for id_ in tenants.keys():
for i in range(users_per_tenant):
users.append({"id": i, "tenant_id": id_,
"credential": mock.MagicMock()})
self.context.update({
"config": {
"users": {
"tenants": 2,
"users_per_tenant": 5,
"concurrent": 10,
},
"zones": {
"zones_per_tenant": zones_per_tenant,
"set_zone_in_network": False
}
},
"admin": {
"credential": mock.MagicMock()
},
"users": users,
"tenants": tenants
})
new_context = copy.deepcopy(self.context)
for id_ in tenants.keys():
new_context["tenants"][id_].setdefault("zones", [])
for i in range(zones_per_tenant):
new_context["tenants"][id_]["zones"].append({"id": "uuid"})
zones_ctx = zones.ZoneGenerator(self.context)
zones_ctx.setup()
self.assertEqual(new_context, self.context)
@mock.patch("%s.neutron.utils.NeutronScenario" % SCN)
@mock.patch("%s.designate.utils.DesignateScenario._create_zone" % SCN,
return_value={"id": "uuid", "name": "fake_name"})
def test_setup_for_existinge(self, mock_designate_scenario__create_zone,
mock_neutron_scenario):
tenants_count = 1
users_per_tenant = 1
networks = []
tenants = self._gen_tenants(tenants_count)
users = []
for id_ in tenants.keys():
networks.append(
{"id": f"foo_net_{id_}",
"tenant_id": id_, "subnets": ["foo_subnet"]})
for i in range(users_per_tenant):
users.append({"id": i, "tenant_id": id_,
"credential": mock.MagicMock()})
tenants["0"]["networks"] = networks
self.context.update({
"config": {
"users": {
"tenants": 1,
"users_per_tenant": 1,
"concurrent": 1,
},
"zones": {
"set_zone_in_network": True
},
"network": {}
},
"admin": {
"credential": mock.MagicMock()
},
"users": users,
"tenants": tenants
})
zones_ctx = zones.ZoneGenerator(self.context)
zones_ctx.setup()
mock_neutron_scenario.assert_called_once()
scenario = mock_neutron_scenario.return_value
scenario.clients.assert_called_with("neutron")
neutron = scenario.clients.return_value
neutron.update_network.assert_called_with(
"foo_net_0", {"network": {"dns_domain": "fake_name"}})
@mock.patch("%s.designate.zones.resource_manager.cleanup" % CTX)
def test_cleanup(self, mock_cleanup):
tenants_count = 2
users_per_tenant = 5
zones_per_tenant = 5
tenants = self._gen_tenants(tenants_count)
users = []
for id_ in tenants.keys():
for i in range(users_per_tenant):
users.append({"id": i, "tenant_id": id_,
"endpoint": "endpoint"})
tenants[id_].setdefault("zones", [])
for j in range(zones_per_tenant):
tenants[id_]["zones"].append({"id": "uuid"})
self.context.update({
"config": {
"users": {
"tenants": 2,
"users_per_tenant": 5,
"concurrent": 10,
},
"zones": {
"zones_per_tenant": 5,
}
},
"admin": {
"endpoint": mock.MagicMock()
},
"users": users,
"tenants": tenants
})
zones_ctx = zones.ZoneGenerator(self.context)
zones_ctx.cleanup()
mock_cleanup.assert_called_once_with(
names=["designate.zones"],
users=self.context["users"],
superclass=utils.DesignateScenario,
task_id=self.context["owner_id"])
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,738
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/common/services/storage/cinder_common.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from rally import exceptions
from rally.task import atomic
from rally.task import utils as bench_utils
from rally_openstack.common.services.image import image
from rally_openstack.common.services.storage import block
CONF = block.CONF
class CinderMixin(object):
def _get_client(self):
return self._clients.cinder(self.version)
def _update_resource(self, resource):
try:
manager = getattr(resource, "manager", None)
if manager:
res = manager.get(resource.id)
else:
if isinstance(resource, block.Volume):
attr = "volumes"
elif isinstance(resource, block.VolumeSnapshot):
attr = "volume_snapshots"
elif isinstance(resource, block.VolumeBackup):
attr = "backups"
res = getattr(self._get_client(), attr).get(resource.id)
except Exception as e:
if getattr(e, "code", getattr(e, "http_status", 400)) == 404:
raise exceptions.GetResourceNotFound(resource=resource)
raise exceptions.GetResourceFailure(resource=resource, err=e)
return res
def _wait_available_volume(self, volume):
return bench_utils.wait_for_status(
volume,
ready_statuses=["available"],
update_resource=self._update_resource,
timeout=CONF.openstack.cinder_volume_create_timeout,
check_interval=CONF.openstack.cinder_volume_create_poll_interval
)
def get_volume(self, volume_id):
"""Get target volume information."""
aname = "cinder_v%s.get_volume" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volumes.get(volume_id)
def delete_volume(self, volume):
"""Delete target volume."""
aname = "cinder_v%s.delete_volume" % self.version
with atomic.ActionTimer(self, aname):
self._get_client().volumes.delete(volume)
bench_utils.wait_for_status(
volume,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self._update_resource,
timeout=CONF.openstack.cinder_volume_delete_timeout,
check_interval=(CONF.openstack
.cinder_volume_delete_poll_interval)
)
def extend_volume(self, volume, new_size):
"""Extend the size of the specified volume."""
if isinstance(new_size, dict):
new_size = random.randint(new_size["min"], new_size["max"])
aname = "cinder_v%s.extend_volume" % self.version
with atomic.ActionTimer(self, aname):
self._get_client().volumes.extend(volume, new_size)
return self._wait_available_volume(volume)
def list_snapshots(self, detailed=True):
"""Get a list of all snapshots."""
aname = "cinder_v%s.list_snapshots" % self.version
with atomic.ActionTimer(self, aname):
return (self._get_client()
.volume_snapshots.list(detailed))
def set_metadata(self, volume, sets=10, set_size=3):
"""Set volume metadata.
:param volume: The volume to set metadata on
:param sets: how many operations to perform
:param set_size: number of metadata keys to set in each operation
:returns: A list of keys that were set
"""
key = "cinder_v%s.set_%s_metadatas_%s_times" % (self.version,
set_size,
sets)
with atomic.ActionTimer(self, key):
keys = []
for i in range(sets):
metadata = {}
for j in range(set_size):
key = self.generate_random_name()
keys.append(key)
metadata[key] = self.generate_random_name()
self._get_client().volumes.set_metadata(volume, metadata)
return keys
def delete_metadata(self, volume, keys, deletes=10, delete_size=3):
"""Delete volume metadata keys.
Note that ``len(keys)`` must be greater than or equal to
``deletes * delete_size``.
:param volume: The volume to delete metadata from
:param deletes: how many operations to perform
:param delete_size: number of metadata keys to delete in each operation
:param keys: a list of keys to choose deletion candidates from
"""
if len(keys) < deletes * delete_size:
raise exceptions.InvalidArgumentsException(
"Not enough metadata keys to delete: "
"%(num_keys)s keys, but asked to delete %(num_deletes)s" %
{"num_keys": len(keys),
"num_deletes": deletes * delete_size})
# make a shallow copy of the list of keys so that, when we pop
# from it later, we don't modify the original list.
keys = list(keys)
random.shuffle(keys)
action_name = ("cinder_v%s.delete_%s_metadatas_%s_times"
% (self.version, delete_size, deletes))
with atomic.ActionTimer(self, action_name):
for i in range(deletes):
to_del = keys[i * delete_size:(i + 1) * delete_size]
self._get_client().volumes.delete_metadata(volume, to_del)
def update_readonly_flag(self, volume, read_only):
"""Update the read-only access mode flag of the specified volume.
:param volume: The UUID of the volume to update.
:param read_only: The value to indicate whether to update volume to
read-only access mode.
:returns: A tuple of http Response and body
"""
aname = "cinder_v%s.update_readonly_flag" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volumes.update_readonly_flag(
volume, read_only)
def upload_volume_to_image(self, volume, force=False,
container_format="bare", disk_format="raw"):
"""Upload the given volume to image.
Returns created image.
:param volume: volume object
:param force: flag to indicate whether to snapshot a volume even if
it's attached to an instance
:param container_format: container format of image. Acceptable
formats: ami, ari, aki, bare, and ovf
:param disk_format: disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso
:returns: Returns created image object
"""
aname = "cinder_v%s.upload_volume_to_image" % self.version
with atomic.ActionTimer(self, aname):
resp, img = self._get_client().volumes.upload_to_image(
volume, force, self.generate_random_name(), container_format,
disk_format)
# NOTE (e0ne): upload_to_image changes volume status to uploading
# so we need to wait until it will be available.
volume = self._wait_available_volume(volume)
image_id = img["os-volume_upload_image"]["image_id"]
glance = image.Image(self._clients)
image_inst = glance.get_image(image_id)
image_inst = bench_utils.wait_for_status(
image_inst,
ready_statuses=["active"],
update_resource=glance.get_image,
timeout=CONF.openstack.glance_image_create_timeout,
check_interval=(CONF.openstack
.glance_image_create_poll_interval)
)
return image_inst
def create_qos(self, specs):
"""Create a qos specs.
:param specs: A dict of key/value pairs to be set
:rtype: :class:'QoSSpecs'
"""
aname = "cinder_v%s.create_qos" % self.version
name = self.generate_random_name()
with atomic.ActionTimer(self, aname):
return self._get_client().qos_specs.create(name, specs)
def list_qos(self, search_opts=None):
"""Get a list of all qos specs.
:param search_opts: search options
:rtype: list of :class: 'QoSpecs'
"""
aname = "cinder_v%s.list_qos" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().qos_specs.list(search_opts)
def get_qos(self, qos_id):
"""Get a specific qos specs.
:param qos_id: The ID of the :class: 'QoSSpecs' to get
:rtype: :class: 'QoSSpecs'
"""
aname = "cinder_v%s.get_qos" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().qos_specs.get(qos_id)
def set_qos(self, qos_id, set_specs_args):
"""Add/Update keys in qos specs.
:param qos_id: The ID of the :class:`QoSSpecs` to get
:param set_specs_args: A dict of key/value pairs to be set
:rtype: class 'cinderclient.apiclient.base.DictWithMeta'
{"qos_specs": set_specs_args}
"""
aname = "cinder_v%s.set_qos" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().qos_specs.set_keys(qos_id,
set_specs_args)
def qos_associate_type(self, qos_specs, vol_type_id):
"""Associate qos specs from volume type.
:param qos_specs: The qos specs to be associated with
:param vol_type_id: The volume type id to be associated with
:returns: base on client response return True if the request
has been accepted or not
"""
aname = "cinder_v%s.qos_associate_type" % self.version
with atomic.ActionTimer(self, aname):
tuple_res = self._get_client().qos_specs.associate(qos_specs,
vol_type_id)
return (tuple_res[0].status_code == 202)
def qos_disassociate_type(self, qos_specs, vol_type_id):
"""Disassociate qos specs from volume type.
:param qos_specs: The qos specs to be disassociated with
:param vol_type_id: The volume type id to be disassociated with
:returns: base on client response return True if the request
has been accepted or not
"""
aname = "cinder_v%s.qos_disassociate_type" % self.version
with atomic.ActionTimer(self, aname):
tuple_res = self._get_client().qos_specs.disassociate(qos_specs,
vol_type_id)
return (tuple_res[0].status_code == 202)
def delete_snapshot(self, snapshot):
"""Delete the given snapshot.
Returns when the snapshot is actually deleted.
:param snapshot: snapshot object
"""
aname = "cinder_v%s.delete_snapshot" % self.version
with atomic.ActionTimer(self, aname):
self._get_client().volume_snapshots.delete(snapshot)
bench_utils.wait_for_status(
snapshot,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self._update_resource,
timeout=CONF.openstack.cinder_volume_delete_timeout,
check_interval=(CONF.openstack
.cinder_volume_delete_poll_interval)
)
def delete_backup(self, backup):
"""Delete the given backup.
Returns when the backup is actually deleted.
:param backup: backup instance
"""
aname = "cinder_v%s.delete_backup" % self.version
with atomic.ActionTimer(self, aname):
self._get_client().backups.delete(backup)
bench_utils.wait_for_status(
backup,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self._update_resource,
timeout=CONF.openstack.cinder_volume_delete_timeout,
check_interval=(CONF.openstack
.cinder_volume_delete_poll_interval)
)
def restore_backup(self, backup_id, volume_id=None):
"""Restore the given backup.
:param backup_id: The ID of the backup to restore.
:param volume_id: The ID of the volume to restore the backup to.
"""
aname = "cinder_v%s.restore_backup" % self.version
with atomic.ActionTimer(self, aname):
restore = self._get_client().restores.restore(backup_id, volume_id)
restored_volume = self._get_client().volumes.get(restore.volume_id)
return self._wait_available_volume(restored_volume)
def list_backups(self, detailed=True):
"""Return user volume backups list.
:param detailed: True if detailed information about backup
should be listed
"""
aname = "cinder_v%s.list_backups" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().backups.list(detailed)
def list_transfers(self, detailed=True, search_opts=None):
"""Get a list of all volume transfers.
:param detailed: If True, detailed information about transfer
should be listed
:param search_opts: Search options to filter out volume transfers
:returns: list of :class:`VolumeTransfer`
"""
aname = "cinder_v%s.list_transfers" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().transfers.list(detailed, search_opts)
def get_volume_type(self, volume_type):
"""get details of volume_type.
:param volume_type: The ID of the :class:`VolumeType` to get
:returns: :class:`VolumeType`
"""
aname = "cinder_v%s.get_volume_type" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volume_types.get(volume_type)
def delete_volume_type(self, volume_type):
"""delete a volume type.
:param volume_type: Name or Id of the volume type
:returns: base on client response return True if the request
has been accepted or not
"""
aname = "cinder_v%s.delete_volume_type" % self.version
with atomic.ActionTimer(self, aname):
tuple_res = self._get_client().volume_types.delete(
volume_type)
return tuple_res[0].status_code == 202
def set_volume_type_keys(self, volume_type, metadata):
"""Set extra specs on a volume type.
:param volume_type: The :class:`VolumeType` to set extra spec on
:param metadata: A dict of key/value pairs to be set
:returns: extra_specs if the request has been accepted
"""
aname = "cinder_v%s.set_volume_type_keys" % self.version
with atomic.ActionTimer(self, aname):
return volume_type.set_keys(metadata)
def transfer_create(self, volume_id, name=None):
"""Create a volume transfer.
:param name: The name of created transfer
:param volume_id: The ID of the volume to transfer
:rtype: VolumeTransfer
"""
name = name or self.generate_random_name()
aname = "cinder_v%s.transfer_create" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().transfers.create(volume_id, name=name)
def transfer_accept(self, transfer_id, auth_key):
"""Accept a volume transfer.
:param transfer_id: The ID of the transfer to accept.
:param auth_key: The auth_key of the transfer.
:rtype: VolumeTransfer
"""
aname = "cinder_v%s.transfer_accept" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().transfers.accept(transfer_id, auth_key)
def create_encryption_type(self, volume_type, specs):
"""Create encryption type for a volume type. Default: admin only.
:param volume_type: the volume type on which to add an encryption type
:param specs: the encryption type specifications to add
:return: an instance of :class: VolumeEncryptionType
"""
aname = "cinder_v%s.create_encryption_type" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volume_encryption_types.create(
volume_type, specs)
def get_encryption_type(self, volume_type):
"""Get the volume encryption type for the specified volume type.
:param volume_type: the volume type to query
:return: an instance of :class: VolumeEncryptionType
"""
aname = "cinder_v%s.get_encryption_type" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volume_encryption_types.get(
volume_type)
def list_encryption_type(self, search_opts=None):
"""List all volume encryption types.
:param search_opts: Options used when search for encryption types
:return: a list of :class: VolumeEncryptionType instances
"""
aname = "cinder_v%s.list_encryption_type" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volume_encryption_types.list(
search_opts)
def delete_encryption_type(self, volume_type):
"""Delete the encryption type information for the specified volume type
:param volume_type: the volume type whose encryption type information
must be deleted
"""
aname = "cinder_v%s.delete_encryption_type" % self.version
with atomic.ActionTimer(self, aname):
resp = self._get_client().volume_encryption_types.delete(
volume_type)
if (resp[0].status_code != 202):
raise exceptions.RallyException(
"EncryptionType Deletion Failed")
def update_encryption_type(self, volume_type, specs):
"""Update the encryption type information for the specified volume type
:param volume_type: the volume type whose encryption type information
must be updated
:param specs: the encryption type specifications to update
:return: an instance of :class: VolumeEncryptionType
"""
aname = "cinder_v%s.update_encryption_type" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volume_encryption_types.update(
volume_type, specs)
class UnifiedCinderMixin(object):
@staticmethod
def _unify_backup(backup):
return block.VolumeBackup(id=backup.id, name=backup.name,
volume_id=backup.volume_id,
status=backup.status)
@staticmethod
def _unify_transfer(transfer):
return block.VolumeTransfer(
id=transfer.id,
name=transfer.name,
volume_id=transfer.volume_id,
# NOTE(andreykurilin): we need to access private field to avoid
# calling extra GET request when the object is not fully
# loaded.
auth_key=transfer._info.get("auth_key"))
@staticmethod
def _unify_qos(qos):
return block.QoSSpecs(id=qos.id, name=qos.name, specs=qos.specs)
@staticmethod
def _unify_encryption_type(encryption_type):
return block.VolumeEncryptionType(
id=encryption_type.encryption_id,
volume_type_id=encryption_type.volume_type_id)
def delete_volume(self, volume):
"""Delete a volume."""
self._impl.delete_volume(volume)
def set_metadata(self, volume, sets=10, set_size=3):
"""Update/Set a volume metadata.
:param volume: The updated/setted volume.
:param sets: how many operations to perform
:param set_size: number of metadata keys to set in each operation
:returns: A list of keys that were set
"""
return self._impl.set_metadata(volume, sets=sets, set_size=set_size)
def delete_metadata(self, volume, keys, deletes=10, delete_size=3):
"""Delete volume metadata keys.
Note that ``len(keys)`` must be greater than or equal to
``deletes * delete_size``.
:param volume: The volume to delete metadata from
:param deletes: how many operations to perform
:param delete_size: number of metadata keys to delete in each operation
:param keys: a list of keys to choose deletion candidates from
"""
self._impl.delete_metadata(volume, keys=keys, deletes=10,
delete_size=3)
def update_readonly_flag(self, volume, read_only):
"""Update the read-only access mode flag of the specified volume.
:param volume: The UUID of the volume to update.
:param read_only: The value to indicate whether to update volume to
read-only access mode.
:returns: A tuple of http Response and body
"""
return self._impl.update_readonly_flag(volume, read_only=read_only)
def upload_volume_to_image(self, volume, force=False,
container_format="bare", disk_format="raw"):
"""Upload the given volume to image.
Returns created image.
:param volume: volume object
:param force: flag to indicate whether to snapshot a volume even if
it's attached to an instance
:param container_format: container format of image. Acceptable
formats: ami, ari, aki, bare, and ovf
:param disk_format: disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso
:returns: Returns created image object
"""
return self._impl.upload_volume_to_image(
volume, force=force, container_format=container_format,
disk_format=disk_format)
def create_qos(self, specs):
"""Create a qos specs.
:param specs: A dict of key/value pairs to be set
:rtype: :class:'QoSSpecs'
"""
return self._unify_qos(self._impl.create_qos(specs))
def list_qos(self, search_opts=None):
"""Get a list of all qos specs.
:param search_opts: search options
:rtype: list of :class: 'QoSpecs'
"""
return [self._unify_qos(qos)
for qos in self._impl.list_qos(search_opts)]
def get_qos(self, qos_id):
"""Get a specific qos specs.
:param qos_id: The ID of the :class: 'QoSSpecs' to get
:rtype: :class: 'QoSSpecs'
"""
return self._unify_qos(self._impl.get_qos(qos_id))
def set_qos(self, qos, set_specs_args):
"""Add/Update keys in qos specs.
:param qos: The instance of the :class:`QoSSpecs` to set
:param set_specs_args: A dict of key/value pairs to be set
:rtype: :class: 'QoSSpecs'
"""
self._impl.set_qos(qos.id, set_specs_args)
return self._unify_qos(qos)
def qos_associate_type(self, qos_specs, vol_type_id):
"""Associate qos specs from volume type.
:param qos_specs: The qos specs to be associated with
:param vol_type_id: The volume type id to be associated with
"""
self._impl.qos_associate_type(qos_specs, vol_type_id)
return self._unify_qos(qos_specs)
def qos_disassociate_type(self, qos_specs, vol_type_id):
"""Disassociate qos specs from volume type.
:param qos_specs: The qos specs to be disassociated with
:param vol_type_id: The volume type id to be disassociated with
"""
self._impl.qos_disassociate_type(qos_specs, vol_type_id)
return self._unify_qos(qos_specs)
def delete_snapshot(self, snapshot):
"""Delete the given backup.
Returns when the backup is actually deleted.
:param backup: backup instance
"""
self._impl.delete_snapshot(snapshot)
def delete_backup(self, backup):
"""Delete a volume backup."""
self._impl.delete_backup(backup)
def list_backups(self, detailed=True):
"""Return user volume backups list."""
return [self._unify_backup(backup)
for backup in self._impl.list_backups(detailed=detailed)]
def list_transfers(self, detailed=True, search_opts=None):
"""Get a list of all volume transfers.
:param detailed: If True, detailed information about transfer
should be listed
:param search_opts: Search options to filter out volume transfers
:returns: list of :class:`VolumeTransfer`
"""
return [self._unify_transfer(transfer)
for transfer in self._impl.list_transfers(
detailed=detailed, search_opts=search_opts)]
def get_volume_type(self, volume_type):
"""get details of volume_type.
:param volume_type: The ID of the :class:`VolumeType` to get
:returns: :class:`VolumeType`
"""
return self._impl.get_volume_type(volume_type)
def delete_volume_type(self, volume_type):
"""delete a volume type.
:param volume_type: Name or Id of the volume type
:returns: base on client response return True if the request
has been accepted or not
"""
return self._impl.delete_volume_type(volume_type)
def update_volume_type(self, volume_type, name=None,
description=None, is_public=None):
"""Update the name and/or description for a volume type.
:param volume_type: The ID or an instance of the :class:`VolumeType`
to update.
:param name: if None, updates name by generating random name.
else updates name with provided name
:param description: Description of the volume type.
:rtype: :class:`VolumeType`
"""
return self._impl.update_volume_type(
volume_type=volume_type, name=name, description=description,
is_public=is_public
)
def add_type_access(self, volume_type, project):
"""Add a project to the given volume type access list.
:param volume_type: Volume type name or ID to add access for the given
project
:project: Project ID to add volume type access for
:return: An instance of cinderclient.apiclient.base.TupleWithMeta
"""
return self._impl.add_type_access(
volume_type=volume_type, project=project
)
def list_type_access(self, volume_type):
"""Print access information about the given volume type
:param volume_type: Filter results by volume type name or ID
:return: VolumeTypeAccess of specific project
"""
return self._impl.list_type_access(volume_type)
def set_volume_type_keys(self, volume_type, metadata):
"""Set extra specs on a volume type.
:param volume_type: The :class:`VolumeType` to set extra spec on
:param metadata: A dict of key/value pairs to be set
:returns: extra_specs if the request has been accepted
"""
return self._impl.set_volume_type_keys(volume_type, metadata)
def transfer_create(self, volume_id, name=None):
"""Creates a volume transfer.
:param name: The name of created transfer
:param volume_id: The ID of the volume to transfer.
:returns: Return the created transfer.
"""
return self._unify_transfer(
self._impl.transfer_create(volume_id, name=name))
def transfer_accept(self, transfer_id, auth_key):
"""Accept a volume transfer.
:param transfer_id: The ID of the transfer to accept.
:param auth_key: The auth_key of the transfer.
:returns: VolumeTransfer
"""
return self._unify_transfer(
self._impl.transfer_accept(transfer_id, auth_key=auth_key))
def create_encryption_type(self, volume_type, specs):
"""Create encryption type for a volume type. Default: admin only.
:param volume_type: the volume type on which to add an encryption type
:param specs: the encryption type specifications to add
:return: an instance of :class: VolumeEncryptionType
"""
return self._unify_encryption_type(
self._impl.create_encryption_type(volume_type, specs=specs))
def get_encryption_type(self, volume_type):
"""Get the volume encryption type for the specified volume type.
:param volume_type: the volume type to query
:return: an instance of :class: VolumeEncryptionType
"""
return self._unify_encryption_type(
self._impl.get_encryption_type(volume_type))
def list_encryption_type(self, search_opts=None):
"""List all volume encryption types.
:param search_opts: Options used when search for encryption types
:return: a list of :class: VolumeEncryptionType instances
"""
return [self._unify_encryption_type(encryption_type)
for encryption_type in self._impl.list_encryption_type(
search_opts=search_opts)]
def delete_encryption_type(self, volume_type):
"""Delete the encryption type information for the specified volume type
:param volume_type: the volume type whose encryption type information
must be deleted
"""
return self._impl.delete_encryption_type(volume_type)
def update_encryption_type(self, volume_type, specs):
"""Update the encryption type information for the specified volume type
:param volume_type: the volume type whose encryption type information
must be updated
:param specs: the encryption type specifications to update
:return: an instance of :class: VolumeEncryptionType
"""
return self._impl.update_encryption_type(volume_type, specs=specs)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,739
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/hooks/test_fault_injection.py
|
# Copyright 2016: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
import os_faults
from os_faults.api import error
from rally import consts
from rally.task import hook
from rally_openstack.task.hooks import fault_injection
from tests.unit import fakes
from tests.unit import test
@ddt.ddt
class FaultInjectionHookTestCase(test.TestCase):
def setUp(self):
super(FaultInjectionHookTestCase, self).setUp()
self.task = {"deployment_uuid": "foo_uuid"}
@ddt.data((dict(action="foo"), True),
(dict(action="foo", verify=True), True),
(dict(action=10), False),
(dict(action="foo", verify=10), False),
(dict(), False))
@ddt.unpack
def test_config_schema(self, config, valid):
results = hook.HookAction.validate("fault_injection", None, None,
config)
if valid:
self.assertEqual([], results)
else:
self.assertEqual(1, len(results))
@mock.patch("rally.common.objects.Deployment.get")
@mock.patch("rally.common.utils.Timer", side_effect=fakes.FakeTimer)
def test_run(self, mock_timer, mock_deployment_get):
mock_deployment_get.return_value = {"config": {}}
hook = fault_injection.FaultInjectionHook(
self.task, {"action": "foo", "verify": True},
{"iteration": 1})
with mock.patch.object(os_faults, "human_api") as mock_human_api:
with mock.patch.object(os_faults, "connect") as mock_connect:
hook.run_sync()
injector_inst = mock_connect.return_value
mock_connect.assert_called_once_with(None)
mock_human_api.assert_called_once_with(injector_inst, "foo")
self.assertEqual(
{"finished_at": fakes.FakeTimer().finish_timestamp(),
"started_at": fakes.FakeTimer().timestamp(),
"status": consts.HookStatus.SUCCESS,
"triggered_by": {"iteration": 1}},
hook.result())
injector_inst.verify.assert_called_once_with()
@mock.patch("rally.common.objects.Deployment.get")
@mock.patch("rally.common.utils.Timer", side_effect=fakes.FakeTimer)
def test_run_extra_config(self, mock_timer, mock_deployment_get):
mock_deployment_get.return_value = {
"config": {"type": "ExistingCloud",
"extra": {"cloud_config": {"conf": "foo_config"}}}}
hook = fault_injection.FaultInjectionHook(
self.task, {"action": "foo"}, {"iteration": 1})
with mock.patch.object(os_faults, "human_api") as mock_human_api:
with mock.patch.object(os_faults, "connect") as mock_connect:
hook.run_sync()
injector_inst = mock_connect.return_value
mock_connect.assert_called_once_with({"conf": "foo_config"})
mock_human_api.assert_called_once_with(injector_inst, "foo")
self.assertEqual(
{"finished_at": fakes.FakeTimer().finish_timestamp(),
"started_at": fakes.FakeTimer().timestamp(),
"status": consts.HookStatus.SUCCESS,
"triggered_by": {"iteration": 1}},
hook.result())
@mock.patch("rally.common.objects.Deployment.get")
@mock.patch("os_faults.human_api")
@mock.patch("os_faults.connect")
@mock.patch("rally.common.utils.Timer", side_effect=fakes.FakeTimer)
def test_run_error(self, mock_timer, mock_connect, mock_human_api,
mock_deployment_get):
mock_deployment_get.return_value = {"config": {}}
injector_inst = mock_connect.return_value
mock_human_api.side_effect = error.OSFException("foo error")
hook = fault_injection.FaultInjectionHook(
self.task, {"action": "foo", "verify": True},
{"iteration": 1})
hook.run_sync()
self.assertEqual(
{"finished_at": fakes.FakeTimer().finish_timestamp(),
"started_at": fakes.FakeTimer().timestamp(),
"status": consts.HookStatus.FAILED,
"error": {
"details": mock.ANY,
"etype": "OSFException",
"msg": "foo error"},
"triggered_by": {"iteration": 1}},
hook.result())
mock_connect.assert_called_once_with(None)
injector_inst.verify.assert_called_once_with()
mock_human_api.assert_called_once_with(injector_inst, "foo")
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,740
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/cleanup/user.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from rally.common import validation
from rally_openstack.task.cleanup import manager
from rally_openstack.task import context
from rally_openstack.task.contexts.cleanup import base
from rally_openstack.task import scenario
@validation.add(name="check_cleanup_resources", admin_required=False)
# NOTE(amaretskiy): Set maximum order to run this last
@context.configure(name="cleanup", platform="openstack", order=sys.maxsize,
hidden=True)
class UserCleanup(base.CleanupMixin, context.OpenStackContext):
"""Context class for user resources cleanup."""
def cleanup(self):
manager.cleanup(
names=self.config,
admin_required=False,
users=self.context.get("users", []),
superclass=scenario.OpenStackScenario,
task_id=self.get_owner_id()
)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,741
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/zaqar/test_basic.py
|
# Copyright (c) 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.scenarios.zaqar import basic
from tests.unit import test
BASE = "rally_openstack.task.scenarios.zaqar.basic"
class ZaqarBasicTestCase(test.ScenarioTestCase):
@mock.patch("%s.CreateQueue.generate_random_name" % BASE,
return_value="fizbit")
def test_create_queue(self, mock_random_name):
scenario = basic.CreateQueue(self.context)
scenario._queue_create = mock.MagicMock()
scenario.run(fakearg="fake")
scenario._queue_create.assert_called_once_with(fakearg="fake")
@mock.patch("%s.CreateQueue.generate_random_name" % BASE,
return_value="kitkat")
def test_producer_consumer(self, mock_random_name):
scenario = basic.ProducerConsumer(self.context)
messages = [{"body": {"id": idx}, "ttl": 360} for idx
in range(20)]
queue = mock.MagicMock()
scenario._queue_create = mock.MagicMock(return_value=queue)
scenario._messages_post = mock.MagicMock()
scenario._messages_list = mock.MagicMock()
scenario._queue_delete = mock.MagicMock()
scenario.run(min_msg_count=20, max_msg_count=20, fakearg="fake")
scenario._queue_create.assert_called_once_with(fakearg="fake")
scenario._messages_post.assert_called_once_with(queue, messages,
20, 20)
scenario._messages_list.assert_called_once_with(queue)
scenario._queue_delete.assert_called_once_with(queue)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,742
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/contexts/network/test_existing_network.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.contexts.network import existing_network
from tests.unit import test
CTX = "rally_openstack.task.contexts.network"
class ExistingNetworkTestCase(test.TestCase):
def setUp(self):
super(ExistingNetworkTestCase, self).setUp()
self.config = {"foo": "bar"}
self.context = test.get_test_context()
self.context.update({
"users": [
{"id": 1,
"tenant_id": "tenant1",
"credential": mock.Mock(tenant_name="tenant_1")},
{"id": 2,
"tenant_id": "tenant2",
"credential": mock.Mock(tenant_name="tenant_2")},
],
"tenants": {
"tenant1": {},
"tenant2": {},
},
"config": {
"existing_network": self.config
},
})
@mock.patch("rally_openstack.common.osclients.Clients")
def test_setup(self, mock_clients):
clients = {
# key is tenant_name
"tenant_1": mock.MagicMock(),
"tenant_2": mock.MagicMock()
}
mock_clients.side_effect = lambda cred: clients[cred.tenant_name]
networks = {
# key is tenant_id
"tenant_1": [mock.Mock(), mock.Mock()],
"tenant_2": [mock.Mock()]
}
subnets = {
# key is tenant_id
"tenant_1": [mock.Mock()],
"tenant_2": [mock.Mock()]
}
neutron1 = clients["tenant_1"].neutron.return_value
neutron2 = clients["tenant_2"].neutron.return_value
neutron1.list_networks.return_value = {
"networks": networks["tenant_1"]}
neutron2.list_networks.return_value = {
"networks": networks["tenant_2"]}
neutron1.list_subnets.return_value = {"subnets": subnets["tenant_1"]}
neutron2.list_subnets.return_value = {"subnets": subnets["tenant_2"]}
context = existing_network.ExistingNetwork(self.context)
context.setup()
mock_clients.assert_has_calls([
mock.call(u["credential"]) for u in self.context["users"]])
neutron1.list_networks.assert_called_once_with()
neutron1.list_subnets.assert_called_once_with()
neutron2.list_networks.assert_called_once_with()
neutron2.list_subnets.assert_called_once_with()
self.assertEqual(
self.context["tenants"],
{
"tenant1": {"networks": networks["tenant_1"],
"subnets": subnets["tenant_1"]},
"tenant2": {"networks": networks["tenant_2"],
"subnets": subnets["tenant_2"]},
}
)
def test_cleanup(self):
# NOTE(stpierre): Test that cleanup is not abstract
existing_network.ExistingNetwork({"task": mock.MagicMock()}).cleanup()
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,743
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/octavia/utils.py
|
# Copyright 2018: Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally_openstack.common.services.loadbalancer import octavia
from rally_openstack.task import scenario
class OctaviaBase(scenario.OpenStackScenario):
"""Base class for Octavia scenarios with basic atomic actions."""
def __init__(self, context=None, admin_clients=None, clients=None):
super(OctaviaBase, self).__init__(context, admin_clients, clients)
if hasattr(self, "_admin_clients"):
self.admin_octavia = octavia.Octavia(
self._admin_clients, name_generator=self.generate_random_name,
atomic_inst=self.atomic_actions())
if hasattr(self, "_clients"):
self.octavia = octavia.Octavia(
self._clients, name_generator=self.generate_random_name,
atomic_inst=self.atomic_actions())
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,744
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/authenticate/authenticate.py
|
# Copyright 2014 Red Hat, Inc. <http://www.redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally_openstack.common import consts
from rally.task import atomic
from rally.task import validation
from rally_openstack.task import scenario
"""Scenarios for Authentication mechanism."""
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="Authenticate.keystone", platform="openstack")
class Keystone(scenario.OpenStackScenario):
@atomic.action_timer("authenticate.keystone")
def run(self):
"""Check Keystone Client."""
self.clients("keystone")
@validation.add("number", param_name="repetitions", minval=1)
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="Authenticate.validate_glance", platform="openstack")
class ValidateGlance(scenario.OpenStackScenario):
def run(self, repetitions):
"""Check Glance Client to ensure validation of token.
Creation of the client does not ensure validation of the token.
We have to do some minimal operation to make sure token gets validated.
In following we are checking for non-existent image.
:param repetitions: number of times to validate
"""
glance_client = self.clients("glance")
image_name = "__intentionally_non_existent_image___"
with atomic.ActionTimer(self, "authenticate.validate_glance"):
for i in range(repetitions):
list(glance_client.images.list(name=image_name))
@validation.add("number", param_name="repetitions", minval=1)
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="Authenticate.validate_nova", platform="openstack")
class ValidateNova(scenario.OpenStackScenario):
def run(self, repetitions):
"""Check Nova Client to ensure validation of token.
Creation of the client does not ensure validation of the token.
We have to do some minimal operation to make sure token gets validated.
:param repetitions: number of times to validate
"""
nova_client = self.clients("nova")
with atomic.ActionTimer(self, "authenticate.validate_nova"):
for i in range(repetitions):
nova_client.flavors.list()
@validation.add("number", param_name="repetitions", minval=1)
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="Authenticate.validate_ceilometer",
platform="openstack")
class ValidateCeilometer(scenario.OpenStackScenario):
def run(self, repetitions):
"""Check Ceilometer Client to ensure validation of token.
Creation of the client does not ensure validation of the token.
We have to do some minimal operation to make sure token gets validated.
:param repetitions: number of times to validate
"""
ceilometer_client = self.clients("ceilometer")
with atomic.ActionTimer(self, "authenticate.validate_ceilometer"):
for i in range(repetitions):
ceilometer_client.meters.list()
@validation.add("number", param_name="repetitions", minval=1)
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="Authenticate.validate_cinder", platform="openstack")
class ValidateCinder(scenario.OpenStackScenario):
def run(self, repetitions):
"""Check Cinder Client to ensure validation of token.
Creation of the client does not ensure validation of the token.
We have to do some minimal operation to make sure token gets validated.
:param repetitions: number of times to validate
"""
cinder_client = self.clients("cinder")
with atomic.ActionTimer(self, "authenticate.validate_cinder"):
for i in range(repetitions):
cinder_client.volume_types.list()
@validation.add("number", param_name="repetitions", minval=1)
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="Authenticate.validate_neutron", platform="openstack")
class ValidateNeutron(scenario.OpenStackScenario):
def run(self, repetitions):
"""Check Neutron Client to ensure validation of token.
Creation of the client does not ensure validation of the token.
We have to do some minimal operation to make sure token gets validated.
:param repetitions: number of times to validate
"""
neutron_client = self.clients("neutron")
with atomic.ActionTimer(self, "authenticate.validate_neutron"):
for i in range(repetitions):
neutron_client.list_networks()
@validation.add("number", param_name="repetitions", minval=1)
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="Authenticate.validate_octavia", platform="openstack")
class ValidateOctavia(scenario.OpenStackScenario):
def run(self, repetitions):
"""Check Octavia Client to ensure validation of token.
Creation of the client does not ensure validation of the token.
We have to do some minimal operation to make sure token gets validated.
:param repetitions: number of times to validate
"""
octavia_client = self.clients("octavia")
with atomic.ActionTimer(self, "authenticate.validate_octavia"):
for i in range(repetitions):
octavia_client.load_balancer_list()
@validation.add("number", param_name="repetitions", minval=1)
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="Authenticate.validate_heat", platform="openstack")
class ValidateHeat(scenario.OpenStackScenario):
def run(self, repetitions):
"""Check Heat Client to ensure validation of token.
Creation of the client does not ensure validation of the token.
We have to do some minimal operation to make sure token gets validated.
:param repetitions: number of times to validate
"""
heat_client = self.clients("heat")
with atomic.ActionTimer(self, "authenticate.validate_heat"):
for i in range(repetitions):
list(heat_client.stacks.list(limit=0))
@validation.add("number", param_name="repetitions", minval=1)
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_services",
services=[consts.Service.MONASCA])
@scenario.configure(name="Authenticate.validate_monasca", platform="openstack")
class ValidateMonasca(scenario.OpenStackScenario):
def run(self, repetitions):
"""Check Monasca Client to ensure validation of token.
Creation of the client does not ensure validation of the token.
We have to do some minimal operation to make sure token gets validated.
:param repetitions: number of times to validate
"""
monasca_client = self.clients("monasca")
with atomic.ActionTimer(self, "authenticate.validate_monasca"):
for i in range(repetitions):
list(monasca_client.metrics.list(limit=0))
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,745
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/functional/test_cli_env.py
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import unittest
from tests.functional import utils
TEST_ENV = {
"OS_USERNAME": "admin",
"OS_PASSWORD": "admin",
"OS_TENANT_NAME": "admin",
"OS_AUTH_URL": "http://fake/",
}
RALLY_OPTS = {
# speed up failures
"DEFAULT": {"openstack_client_http_timeout": 5}
}
class EnvTestCase(unittest.TestCase):
def test_check_success(self):
rally = utils.Rally()
rally("env check")
def test_check_wrong_url(self):
rally = utils.Rally(config_opts=RALLY_OPTS)
fake_spec = copy.deepcopy(rally.env_spec)
fake_spec["existing@openstack"]["auth_url"] = "http://example.com:5000"
spec = utils.JsonTempFile(fake_spec)
rally("env create --name t_create_env --spec %s" % spec.filename)
try:
rally("env check")
except utils.RallyCliError as e:
output = e.output.split("\n")
line_template = "| :-( | openstack | %s |"
err1 = "Unable to establish connection to http://example.com:5000"
err2 = "Request to http://example.com:5000 timed out"
if (line_template % err1 not in output
and line_template % err2 not in output):
self.fail("The output of `env check` doesn't contain expected"
" error. Output:\n" % e.output)
else:
self.fail("Check env command should fail!")
def test_check_wrong_username(self):
rally = utils.Rally(config_opts=RALLY_OPTS)
fake_spec = copy.deepcopy(rally.env_spec)
fake_spec["existing@openstack"]["admin"]["username"] = "MASTER777"
spec = utils.JsonTempFile(fake_spec)
rally("env create --name t_create_env --spec %s" % spec.filename)
try:
rally("env check")
except utils.RallyCliError as e:
line = ("| :-( | openstack | Failed to authenticate to "
"%s for user '%s' in project '%s': The request you have "
"made requires authentication. |" %
(fake_spec["existing@openstack"]["auth_url"],
fake_spec["existing@openstack"]["admin"]["username"],
fake_spec["existing@openstack"]["admin"]["project_name"]))
self.assertIn(line, e.output.split("\n"))
else:
self.fail("Check env command should fail!")
def test_check_wrong_password(self):
rally = utils.Rally(config_opts=RALLY_OPTS)
fake_spec = copy.deepcopy(rally.env_spec)
fake_spec["existing@openstack"]["admin"]["password"] = "MASTER777"
spec = utils.JsonTempFile(fake_spec)
rally("env create --name t_create_env --spec %s" % spec.filename)
try:
rally("env check")
except utils.RallyCliError as e:
line = ("| :-( | openstack | Failed to authenticate to "
"%s for user '%s' in project '%s': The request you have "
"made requires authentication. |" %
(fake_spec["existing@openstack"]["auth_url"],
fake_spec["existing@openstack"]["admin"]["username"],
fake_spec["existing@openstack"]["admin"]["project_name"]))
self.assertIn(line, e.output.split("\n"))
else:
self.fail("Check env command should fail!")
def test_create_from_sysenv(self):
rally = utils.Rally()
rally.env.update(TEST_ENV)
rally("env create --name t_create_env --from-sysenv")
config = rally("env show --only-spec", getjson=True)
self.assertIn("existing@openstack", config)
self.assertEqual(TEST_ENV["OS_USERNAME"],
config["existing@openstack"]["admin"]["username"])
self.assertEqual(TEST_ENV["OS_PASSWORD"],
config["existing@openstack"]["admin"]["password"])
if "project_name" in config["existing@openstack"]["admin"]:
# keystone v3
self.assertEqual(
TEST_ENV["OS_TENANT_NAME"],
config["existing@openstack"]["admin"]["project_name"])
else:
# keystone v2
self.assertEqual(
TEST_ENV["OS_TENANT_NAME"],
config["existing@openstack"]["admin"]["tenant_name"])
self.assertEqual(
TEST_ENV["OS_AUTH_URL"],
config["existing@openstack"]["auth_url"])
def test_check_api_info_success(self):
rally = utils.Rally()
spec = copy.deepcopy(rally.env_spec)
spec["existing@openstack"]["api_info"] = {
"fakedummy": {
"version": "2",
"service_type": "dummyv2"
}
}
spec = utils.JsonTempFile(spec)
rally("env create --name t_create_env_with_api_info"
" --spec %s" % spec.filename)
plugings = "tests/functional/extra/fake_dir/fake_plugin.py"
rally("--plugin-paths %s env check" % plugings)
def test_check_api_info_fail_1(self):
rally = utils.Rally()
spec = copy.deepcopy(rally.env_spec)
spec["existing@openstack"]["api_info"] = {
"fakedummy": {
"version": "3",
"service_type": "dummyv2"
}
}
spec = utils.JsonTempFile(spec)
rally("env create --name t_create_env_with_api_info"
" --spec %s" % spec.filename)
try:
plugings = "tests/functional/extra/fake_dir/fake_plugin.py"
rally("--plugin-paths %s env check" % plugings)
except utils.RallyCliError as e:
self.assertIn("Invalid setting for 'fakedummy':", e.output)
def test_check_api_info_fail_2(self):
rally = utils.Rally()
spec = copy.deepcopy(rally.env_spec)
spec["existing@openstack"]["api_info"] = {
"noneclient": {
"version": "1",
"service_type": "none"
}
}
spec = utils.JsonTempFile(spec)
rally("env create --name t_create_env_with_api_info"
" --spec %s" % spec.filename)
try:
plugings = "tests/functional/extra/fake_dir/fake_plugin.py"
rally("--plugin-paths %s env check" % plugings)
except utils.RallyCliError as e:
self.assertIn(
"Plugin existing@openstack.check_health() method is broken",
e.output)
def test_check_api_info_fail_3(self):
rally = utils.Rally()
spec = copy.deepcopy(rally.env_spec)
spec["existing@openstack"]["api_info"] = {
"faileddummy": {
"version": "2",
"service_type": "dummy"
}
}
spec = utils.JsonTempFile(spec)
rally("env create --name t_create_env_with_api_info"
" --spec %s" % spec.filename)
try:
plugings = "tests/functional/extra/fake_dir/fake_plugin.py"
rally("--plugin-paths %s env check" % plugings)
except utils.RallyCliError as e:
self.assertIn("Can not create 'faileddummy' with 2 version",
e.output)
def test_create_env_with_https_cert_https_key(self):
rally = utils.Rally()
fake_spec = copy.deepcopy(rally.env_spec)
fake_spec["existing@openstack"]["https_cert"] = ""
fake_spec["existing@openstack"]["https_key"] = ""
spec = utils.JsonTempFile(fake_spec)
rally("env create --name t_create_env --spec %s" % spec.filename)
config = rally("env show --only-spec", getjson=True)
self.assertIn("https_cert", config["existing@openstack"].keys())
self.assertIn("https_key", config["existing@openstack"].keys())
rally("env check")
rally("env info")
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,746
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/mistral/utils.py
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import cfg
from rally.task import atomic
from rally.task import utils
import yaml
from rally_openstack.task import scenario
CONF = cfg.CONF
class MistralScenario(scenario.OpenStackScenario):
"""Base class for Mistral scenarios with basic atomic actions."""
@atomic.action_timer("mistral.list_workbooks")
def _list_workbooks(self):
"""Gets list of existing workbooks.
:returns: workbook list
"""
return self.clients("mistral").workbooks.list()
@atomic.action_timer("mistral.create_workbook")
def _create_workbook(self, definition, namespace=""):
"""Create a new workbook.
:param definition: workbook description in string
(yaml string) format
:param namespace: the namespace where the workbook
will be created in
:returns: workbook object
"""
definition = yaml.safe_load(definition)
definition["name"] = self.generate_random_name()
definition = yaml.safe_dump(definition)
return self.clients("mistral").workbooks.create(
definition,
namespace=namespace
)
@atomic.action_timer("mistral.delete_workbook")
def _delete_workbook(self, wb_name, namespace=""):
"""Delete the given workbook.
:param wb_name: the name of workbook that would be deleted.
:param namespace: the namespace of workbook that would be deleted.
"""
self.clients("mistral").workbooks.delete(
wb_name,
namespace=namespace
)
@atomic.action_timer("mistral.create_workflow")
def _create_workflow(self, definition, namespace=""):
"""creates a workflow in the given namespace.
:param definition: the definition of workflow
:param namespace: the namespace of the workflow
"""
return self.clients("mistral").workflows.create(
definition,
namespace=namespace
)
@atomic.action_timer("mistral.delete_workflow")
def _delete_workflow(self, workflow_identifier, namespace=""):
"""Delete the given workflow.
:param workflow_identifier: the identifier of workflow
:param namespace: the namespace of the workflow
"""
self.clients("mistral").workflows.delete(
workflow_identifier,
namespace=namespace
)
@atomic.action_timer("mistral.list_executions")
def _list_executions(self, marker="", limit=None, sort_keys="",
sort_dirs=""):
"""Gets list of existing executions.
:returns: execution list
"""
return self.clients("mistral").executions.list(
marker=marker, limit=limit, sort_keys=sort_keys,
sort_dirs=sort_dirs)
@atomic.action_timer("mistral.create_execution")
def _create_execution(self, workflow_identifier, wf_input=None,
namespace="", **params):
"""Create a new execution.
:param workflow_identifier: name or id of the workflow to execute
:param namespace: namespace of the workflow to execute
:param input_: json string of mistral workflow input
:param params: optional mistral params (this is the place to pass
environment).
:returns: executions object
"""
execution = self.clients("mistral").executions.create(
workflow_identifier,
namespace=namespace,
workflow_input=wf_input,
**params
)
execution = utils.wait_for_status(
execution, ready_statuses=["SUCCESS"], failure_statuses=["ERROR"],
update_resource=utils.get_from_manager(),
timeout=CONF.openstack.mistral_execution_timeout)
return execution
@atomic.action_timer("mistral.delete_execution")
def _delete_execution(self, execution):
"""Delete the given execution.
:param ex: the execution that would be deleted.
"""
self.clients("mistral").executions.delete(execution.id)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,747
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/neutron/lbaas.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import logging
from rally.common import validation
from rally_openstack.common import consts
from rally_openstack.common import osclients
from rally_openstack.common.wrappers import network as network_wrapper
from rally_openstack.task import context
LOG = logging.getLogger(__name__)
@validation.add("required_platform", platform="openstack", admin=True,
users=True)
@context.configure(name="lbaas", platform="openstack", order=360)
class Lbaas(context.OpenStackContext):
"""Creates a lb-pool for every subnet created in network context."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"pool": {
"type": "object",
"additionalProperties": True
},
"lbaas_version": {
"type": "integer",
"minimum": 1
}
},
"additionalProperties": False
}
DEFAULT_CONFIG = {
"pool": {
"lb_method": "ROUND_ROBIN",
"protocol": "HTTP"
},
"lbaas_version": 1
}
def setup(self):
net_wrapper = network_wrapper.wrap(
osclients.Clients(self.context["admin"]["credential"]),
self, config=self.config)
use_lb, msg = net_wrapper.supports_extension("lbaas")
if not use_lb:
LOG.info(msg)
return
# Creates a lb-pool for every subnet created in network context.
for user, tenant_id in self._iterate_per_tenants():
for network in self.context["tenants"][tenant_id]["networks"]:
for subnet in network.get("subnets", []):
if self.config["lbaas_version"] == 1:
network.setdefault("lb_pools", []).append(
net_wrapper.create_v1_pool(
tenant_id,
subnet,
**self.config["pool"]))
else:
raise NotImplementedError(
"Context for LBaaS version %s not implemented."
% self.config["lbaas_version"])
def cleanup(self):
net_wrapper = network_wrapper.wrap(
osclients.Clients(self.context["admin"]["credential"]),
self, config=self.config)
for tenant_id, tenant_ctx in self.context["tenants"].items():
for network in tenant_ctx.get("networks", []):
for pool in network.get("lb_pools", []):
with logging.ExceptionLogger(
LOG,
"Failed to delete pool %(pool)s for tenant "
"%(tenant)s" % {"pool": pool["pool"]["id"],
"tenant": tenant_id}):
if self.config["lbaas_version"] == 1:
net_wrapper.delete_v1_pool(pool["pool"]["id"])
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,748
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/murano/environments.py
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.murano import utils
"""Scenarios for Murano environments."""
@validation.add("required_services", services=[consts.Service.MURANO])
@scenario.configure(name="MuranoEnvironments.list_environments",
platform="openstack")
class ListEnvironments(utils.MuranoScenario):
def run(self):
"""List the murano environments.
Run murano environment-list for listing all environments.
"""
self._list_environments()
@validation.add("required_services", services=[consts.Service.MURANO])
@scenario.configure(context={"cleanup@openstack": ["murano.environments"]},
name="MuranoEnvironments.create_and_delete_environment",
platform="openstack")
class CreateAndDeleteEnvironment(utils.MuranoScenario):
def run(self):
"""Create environment, session and delete environment."""
environment = self._create_environment()
self._create_session(environment.id)
self._delete_environment(environment)
@validation.add("required_services", services=[consts.Service.MURANO])
@validation.add("required_contexts", contexts=("murano_packages"))
@scenario.configure(context={"cleanup@openstack": ["murano"],
"roles@openstack": ["admin"]},
name="MuranoEnvironments.create_and_deploy_environment",
platform="openstack")
class CreateAndDeployEnvironment(utils.MuranoScenario):
def run(self, packages_per_env=1):
"""Create environment, session and deploy environment.
Create environment, create session, add app to environment
packages_per_env times, send environment to deploy.
:param packages_per_env: number of packages per environment
"""
environment = self._create_environment()
session = self._create_session(environment.id)
package = self.context["tenant"]["packages"][0]
for i in range(packages_per_env):
self._create_service(environment, session,
package.fully_qualified_name)
self._deploy_environment(environment, session)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,749
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/vm/workloads/test_siege.py
|
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from unittest import mock
from rally_openstack.task.scenarios.vm.workloads import siege
from tests.unit import test
PATH = "rally_openstack.task.scenarios.vm.workloads.siege"
SIEGE_OUTPUT = """
Transactions: 522 hits
Availability: 100.00 %
Elapsed time: 3.69 secs
Data transferred: 1.06 MB
Response time: 0.10 secs
Transaction rate: 141.46 trans/sec
Throughput: 0.29 MB/sec
Concurrency: 14.71
Successful transactions: 522
Failed transactions: 0
Longest transaction: 0.26
Shortest transaction: 0.08
"""
OUTPUT = [
{"output_value": "curl", "descr": "", "output_key": "curl_cli"},
{"output_value": "wp-net", "descr": "", "output_key": "net_name"},
{"output_value": ["10.0.0.3", "172.16.0.159"],
"description": "",
"output_key": "gate_node"},
{"output_value": {
"1": {"wordpress-network": ["10.0.0.4"]},
"0": {"wordpress-network": ["10.0.0.5"]}},
"description": "No description given", "output_key": "wp_nodes"}]
class SiegeTestCase(test.TestCase):
@mock.patch("%s.json.load" % PATH)
def test_get_instances(self, mock_load):
mock_load.return_value = OUTPUT
instances = list(siege.get_instances())
self.assertEqual(["10.0.0.4", "10.0.0.5"], instances)
@mock.patch("%s.get_instances" % PATH)
@mock.patch("%s.generate_urls_list" % PATH)
@mock.patch("%s.subprocess.check_output" % PATH)
def test_run(self, mock_check_output, mock_generate_urls_list,
mock_get_instances):
mock_get_instances.return_value = [1, 2]
mock_generate_urls_list.return_value = "urls"
mock_check_output.return_value = SIEGE_OUTPUT
mock_write = mock.MagicMock()
mock_stdout = mock.MagicMock(write=mock_write)
real_stdout = sys.stdout
sys.stdout = mock_stdout
siege.run()
expected = [mock.call("Transaction rate:141.46\n"),
mock.call("Throughput:0.29\n")]
sys.stdout = real_stdout
self.assertEqual(expected, mock_write.mock_calls)
@mock.patch("%s.tempfile.NamedTemporaryFile" % PATH)
def test_generate_urls_list(self, mock_named_temporary_file):
mock_urls = mock.MagicMock()
mock_named_temporary_file.return_value = mock_urls
name = siege.generate_urls_list(["foo", "bar"])
self.assertEqual(mock_urls.name, name)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,750
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/common/services/storage/cinder_v1.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from rally.common import utils as rutils
from rally.task import atomic
from rally_openstack.common import service
from rally_openstack.common.services.storage import block
from rally_openstack.common.services.storage import cinder_common
CONF = block.CONF
@service.service("cinder", service_type="block-storage", version="1")
class CinderV1Service(service.Service, cinder_common.CinderMixin):
@atomic.action_timer("cinder_v1.create_volume")
def create_volume(self, size, snapshot_id=None, source_volid=None,
display_name=None, display_description=None,
volume_type=None, user_id=None,
project_id=None, availability_zone=None,
metadata=None, imageRef=None):
"""Creates a volume.
:param size: Size of volume in GB
:param snapshot_id: ID of the snapshot
:param display_name: Name of the volume
:param display_description: Description of the volume
:param volume_type: Type of volume
:param user_id: User id derived from context
:param project_id: Project id derived from context
:param availability_zone: Availability Zone to use
:param metadata: Optional metadata to set on volume creation
:param imageRef: reference to an image stored in glance
:returns: Return a new volume.
"""
if isinstance(size, dict):
size = random.randint(size["min"], size["max"])
volume = self._get_client().volumes.create(
size,
display_name=(display_name or self.generate_random_name()),
display_description=display_description,
snapshot_id=snapshot_id,
source_volid=source_volid,
volume_type=volume_type,
user_id=user_id,
project_id=project_id,
availability_zone=availability_zone,
metadata=metadata,
imageRef=imageRef
)
# NOTE(msdubov): It is reasonable to wait 5 secs before starting to
# check whether the volume is ready => less API calls.
rutils.interruptable_sleep(
CONF.openstack.cinder_volume_create_prepoll_delay)
return self._wait_available_volume(volume)
@atomic.action_timer("cinder_v1.update_volume")
def update_volume(self, volume_id, display_name=None,
display_description=None):
"""Update the name or description for a volume.
:param volume_id: The updated volume id.
:param display_name: The volume name.
:param display_description: The volume description.
:returns: The updated volume.
"""
kwargs = {}
if display_name is not None:
kwargs["display_name"] = display_name
if display_description is not None:
kwargs["display_description"] = display_description
updated_volume = self._get_client().volumes.update(
volume_id, **kwargs)
return updated_volume["volume"]
@atomic.action_timer("cinder_v1.list_volumes")
def list_volumes(self, detailed=True, search_opts=None, limit=None):
"""List all volumes."""
return self._get_client().volumes.list(detailed=detailed,
search_opts=search_opts,
limit=limit)
@atomic.action_timer("cinder_v1.list_types")
def list_types(self, search_opts=None):
"""Lists all volume types."""
return (self._get_client()
.volume_types.list(search_opts))
@atomic.action_timer("cinder_v1.create_snapshot")
def create_snapshot(self, volume_id, force=False,
display_name=None, display_description=None):
"""Create one snapshot.
Returns when the snapshot is actually created and is in the "Available"
state.
:param volume_id: volume uuid for creating snapshot
:param force: flag to indicate whether to snapshot a volume even if
it's attached to an instance
:param display_name: Name of the snapshot
:param display_description: Description of the snapshot
:returns: Created snapshot object
"""
kwargs = {"force": force,
"display_name": display_name or self.generate_random_name(),
"display_description": display_description}
snapshot = self._get_client().volume_snapshots.create(volume_id,
**kwargs)
rutils.interruptable_sleep(
CONF.openstack.cinder_volume_create_prepoll_delay)
snapshot = self._wait_available_volume(snapshot)
return snapshot
@atomic.action_timer("cinder_v1.create_backup")
def create_backup(self, volume_id, container=None,
name=None, description=None):
"""Create a volume backup of the given volume.
:param volume_id: The ID of the volume to backup.
:param container: The name of the backup service container.
:param name: The name of the backup.
:param description: The description of the backup.
"""
kwargs = {"name": name or self.generate_random_name(),
"description": description,
"container": container}
backup = self._get_client().backups.create(volume_id, **kwargs)
return self._wait_available_volume(backup)
@atomic.action_timer("cinder_v1.create_volume_type")
def create_volume_type(self, name=None):
"""create volume type.
:param kwargs: Optional additional arguments for volume type creation
:param name: Descriptive name of the volume type
"""
kwargs = {"name": name or self.generate_random_name()}
return self._get_client().volume_types.create(**kwargs)
@service.compat_layer(CinderV1Service)
class UnifiedCinderV1Service(cinder_common.UnifiedCinderMixin,
block.BlockStorage):
@staticmethod
def _unify_volume(volume):
if isinstance(volume, dict):
return block.Volume(id=volume["id"], name=volume["display_name"],
size=volume["size"], status=volume["status"])
else:
return block.Volume(id=volume.id, name=volume.display_name,
size=volume.size, status=volume.status)
@staticmethod
def _unify_snapshot(snapshot):
return block.VolumeSnapshot(id=snapshot.id, name=snapshot.display_name,
volume_id=snapshot.volume_id,
status=snapshot.status)
def create_volume(self, size, consistencygroup_id=None,
group_id=None, snapshot_id=None, source_volid=None,
name=None, description=None,
volume_type=None, user_id=None,
project_id=None, availability_zone=None,
metadata=None, imageRef=None, scheduler_hints=None,
backup_id=None):
"""Creates a volume.
:param size: Size of volume in GB
:param consistencygroup_id: ID of the consistencygroup
:param group_id: ID of the group
:param snapshot_id: ID of the snapshot
:param name: Name of the volume
:param description: Description of the volume
:param volume_type: Type of volume
:param user_id: User id derived from context
:param project_id: Project id derived from context
:param availability_zone: Availability Zone to use
:param metadata: Optional metadata to set on volume creation
:param imageRef: reference to an image stored in glance
:param source_volid: ID of source volume to clone from
:param scheduler_hints: (optional extension) arbitrary key-value pairs
specified by the client to help boot an instance
:param backup_id: ID of the backup(IGNORED)
:returns: Return a new volume.
"""
return self._unify_volume(self._impl.create_volume(
size, snapshot_id=snapshot_id, source_volid=source_volid,
display_name=name,
display_description=description,
volume_type=volume_type, user_id=user_id,
project_id=project_id, availability_zone=availability_zone,
metadata=metadata, imageRef=imageRef))
def list_volumes(self, detailed=True, search_opts=None, marker=None,
limit=None, sort=None):
"""Lists all volumes.
:param detailed: Whether to return detailed volume info.
:param search_opts: Search options to filter out volumes.
:param marker: Begin returning volumes that appear later in the volume
list than that represented by this volume id.(IGNORED)
:param limit: Maximum number of volumes to return.
:param sort: Sort information(IGNORED)
:returns: Return volumes list.
"""
return [self._unify_volume(volume)
for volume in self._impl.list_volumes(detailed=detailed,
search_opts=search_opts,
limit=limit)]
def get_volume(self, volume_id):
"""Get a volume.
:param volume_id: The ID of the volume to get.
:returns: Return the volume.
"""
return self._unify_volume(self._impl.get_volume(volume_id))
def extend_volume(self, volume, new_size):
"""Extend the size of the specified volume."""
return self._unify_volume(
self._impl.extend_volume(volume, new_size=new_size))
def update_volume(self, volume_id,
name=None, description=None):
"""Update the name or description for a volume.
:param volume_id: The updated volume id.
:param name: The volume name.
:param description: The volume description.
:returns: The updated volume.
"""
return self._unify_volume(self._impl.update_volume(
volume_id, display_name=name,
display_description=description))
def list_types(self, search_opts=None, is_public=None):
"""Lists all volume types."""
return self._impl.list_types(search_opts=search_opts)
def create_snapshot(self, volume_id, force=False,
name=None, description=None, metadata=None):
"""Create one snapshot.
Returns when the snapshot is actually created and is in the "Available"
state.
:param volume_id: volume uuid for creating snapshot
:param force: If force is True, create a snapshot even if the volume is
attached to an instance. Default is False.
:param name: Name of the snapshot
:param description: Description of the snapshot
:param metadata: Metadata of the snapshot
:returns: Created snapshot object
"""
return self._unify_snapshot(self._impl.create_snapshot(
volume_id, force=force, display_name=name,
display_description=description))
def list_snapshots(self, detailed=True):
"""Get a list of all snapshots."""
return [self._unify_snapshot(snapshot)
for snapshot in self._impl.list_snapshots(detailed=detailed)]
def create_backup(self, volume_id, container=None,
name=None, description=None,
incremental=False, force=False,
snapshot_id=None):
"""Creates a volume backup.
:param volume_id: The ID of the volume to backup.
:param container: The name of the backup service container.
:param name: The name of the backup.
:param description: The description of the backup.
:param incremental: Incremental backup.
:param force: If True, allows an in-use volume to be backed up.
:param snapshot_id: The ID of the snapshot to backup.
:returns: The created backup object.
"""
return self._unify_backup(self._impl.create_backup(
volume_id, container=container, name=name,
description=description))
def create_volume_type(self, name=None, description=None, is_public=True):
"""Creates a volume type.
:param name: Descriptive name of the volume type
:param description: Description of the volume type
:param is_public: Volume type visibility
:returns: Return the created volume type.
"""
return self._impl.create_volume_type(name=name)
def update_volume_type(self, volume_type, name=None,
description=None, is_public=None):
raise NotImplementedError("Cinder V1 doesn't support this method.")
def add_type_access(self, volume_type, project):
raise NotImplementedError("Cinder V1 doesn't support this method.")
def list_type_access(self, volume_type):
raise NotImplementedError("Cinder V1 doesn't support this method.")
def restore_backup(self, backup_id, volume_id=None):
"""Restore the given backup.
:param backup_id: The ID of the backup to restore.
:param volume_id: The ID of the volume to restore the backup to.
:returns: Return the restored backup.
"""
return self._unify_volume(self._impl.restore_backup(
backup_id, volume_id=volume_id))
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,751
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/functional/test_task_samples.py
|
# Copyright 2014: Mirantis Inc.
# Copyright 2014: Catalyst IT Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import os
import re
import traceback
import unittest
from rally import api
from rally.cli import yamlutils as yaml
from rally.common import broker
from rally import plugins
import rally_openstack as rally_openstack_module
from rally_openstack.common import consts
from rally_openstack.common import credential
from tests.functional import utils
class TestTaskSamples(unittest.TestCase):
NUMBER_OF_THREADS = 20
def _skip(self, validation_output):
"""Help to decide do we want to skip this result or not.
:param validation_output: string representation of the
error that we want to check
:return: True if we want to skip this error
of task sample validation, otherwise False.
"""
skip_lst = ["[Ss]ervice is not available",
"is not installed. To install it run",
"extension.* is not configured"]
for check_str in skip_lst:
if re.search(check_str, validation_output) is not None:
return True
return False
@plugins.ensure_plugins_are_loaded
def test_task_samples_are_valid(self):
from rally_openstack.task.contexts.keystone import users
rally = utils.Rally(force_new_db=True)
# let's use pre-created users to make TestTaskSamples quicker
rapi = api.API(config_file=rally.config_filename)
deployment = rapi.deployment._get("MAIN")
openstack_platform = deployment.env_obj.data["platforms"]["openstack"]
admin_creds = credential.OpenStackCredential(
permission=consts.EndpointPermission.ADMIN,
**openstack_platform["platform_data"]["admin"])
ctx = {
"env": {
"platforms": {
"openstack": {
"admin": admin_creds.to_dict(),
"users": []}}},
"task": {"uuid": self.__class__.__name__,
"deployment_uuid": deployment["uuid"]}}
user_ctx = users.UserGenerator(ctx)
user_ctx.setup()
self.addCleanup(user_ctx.cleanup)
os_creds = deployment["config"]["openstack"]
user = copy.copy(os_creds["admin"])
user["username"] = ctx["users"][0]["credential"].username
user["password"] = ctx["users"][0]["credential"].password
if "project_name" in os_creds["admin"]:
# it is Keystone
user["project_name"] = ctx["users"][0]["credential"].tenant_name
else:
user["tenant_name"] = ctx["users"][0]["credential"].tenant_name
os_creds["users"] = [user]
rally("deployment destroy MAIN", write_report=False)
deployment_cfg = os.path.join(rally.tmp_dir, "new_deployment.json")
with open(deployment_cfg, "w") as f:
f.write(json.dumps({"openstack": os_creds}))
rally("deployment create --name MAIN --filename %s" % deployment_cfg,
write_report=False)
# store all failures and print them at once
failed_samples = {}
def publisher(queue):
"""List all samples and render task configs"""
samples_path = os.path.join(
os.path.dirname(rally_openstack_module.__file__), os.pardir,
"samples", "tasks")
for dirname, dirnames, filenames in os.walk(samples_path):
# NOTE(rvasilets): Skip by suggest of boris-42 because in
# future we don't what to maintain this dir
if dirname.find("tempest-do-not-run-against-production") != -1:
continue
for filename in filenames:
full_path = os.path.join(dirname, filename)
# NOTE(hughsaunders): Skip non config files
# (bug https://bugs.launchpad.net/rally/+bug/1314369)
if os.path.splitext(filename)[1] != ".json":
continue
with open(full_path) as task_file:
input_task = task_file.read()
rendered_task = rapi.task.render_template(
task_template=input_task)
queue.append((full_path, rendered_task))
def consumer(_cache, sample):
"""Validate one sample"""
full_path, rendered_task = sample
task_config = yaml.safe_load(rendered_task)
try:
rapi.task.validate(deployment="MAIN",
config=task_config)
except Exception as e:
if not self._skip(str(e)):
failed_samples[full_path] = traceback.format_exc()
broker.run(publisher, consumer, self.NUMBER_OF_THREADS)
if failed_samples:
self.fail("Validation failed on the one or several samples. "
"See details below:\n%s" %
"".join(["\n======\n%s\n\n%s\n" % (k, v)
for k, v in failed_samples.items()]))
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,752
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/neutron/loadbalancer_v1.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.neutron import utils
"""Scenarios for Neutron Loadbalancer v1."""
@validation.add("restricted_parameters", param_names="subnet_id",
subdict="pool_create_args")
@validation.add("required_neutron_extensions", extensions=["lbaas"])
@validation.add("required_services",
services=[consts.Service.NEUTRON])
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_contexts", contexts=("network"))
@scenario.configure(context={"cleanup@openstack": ["neutron"]},
name="NeutronLoadbalancerV1.create_and_list_pools",
platform="openstack")
class CreateAndListPools(utils.NeutronScenario):
def run(self, pool_create_args=None):
"""Create a pool(v1) and then list pools(v1).
Measure the "neutron lb-pool-list" command performance.
The scenario creates a pool for every subnet and then lists pools.
:param pool_create_args: dict, POST /lb/pools request options
"""
pool_create_args = pool_create_args or {}
networks = self.context.get("tenant", {}).get("networks", [])
self._create_v1_pools(networks, **pool_create_args)
self._list_v1_pools()
@validation.add("restricted_parameters", param_names="subnet_id",
subdict="pool_create_args")
@validation.add("required_neutron_extensions", extensions=["lbaas"])
@validation.add("required_services",
services=[consts.Service.NEUTRON])
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_contexts", contexts=("network"))
@scenario.configure(context={"cleanup@openstack": ["neutron"]},
name="NeutronLoadbalancerV1.create_and_delete_pools",
platform="openstack")
class CreateAndDeletePools(utils.NeutronScenario):
def run(self, pool_create_args=None):
"""Create pools(v1) and delete pools(v1).
Measure the "neutron lb-pool-create" and "neutron lb-pool-delete"
command performance. The scenario creates a pool for every subnet
and then deletes those pools.
:param pool_create_args: dict, POST /lb/pools request options
"""
pool_create_args = pool_create_args or {}
networks = self.context.get("tenant", {}).get("networks", [])
pools = self._create_v1_pools(networks, **pool_create_args)
for pool in pools:
self._delete_v1_pool(pool["pool"])
@validation.add("restricted_parameters", param_names="subnet_id",
subdict="pool_create_args")
@validation.add("required_neutron_extensions", extensions=["lbaas"])
@validation.add("required_services",
services=[consts.Service.NEUTRON])
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_contexts", contexts=("network"))
@scenario.configure(context={"cleanup@openstack": ["neutron"]},
name="NeutronLoadbalancerV1.create_and_update_pools",
platform="openstack")
class CreateAndUpdatePools(utils.NeutronScenario):
def run(self, pool_update_args=None, pool_create_args=None):
"""Create pools(v1) and update pools(v1).
Measure the "neutron lb-pool-create" and "neutron lb-pool-update"
command performance. The scenario creates a pool for every subnet
and then update those pools.
:param pool_create_args: dict, POST /lb/pools request options
:param pool_update_args: dict, POST /lb/pools update options
"""
pool_create_args = pool_create_args or {}
pool_update_args = pool_update_args or {}
networks = self.context.get("tenant", {}).get("networks", [])
pools = self._create_v1_pools(networks, **pool_create_args)
for pool in pools:
self._update_v1_pool(pool, **pool_update_args)
@validation.add("restricted_parameters", param_names=["pool_id", "subnet_id"],
subdict="vip_create_args")
@validation.add("required_neutron_extensions", extensions=["lbaas"])
@validation.add("required_services",
services=[consts.Service.NEUTRON])
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_contexts", contexts=("network"))
@scenario.configure(context={"cleanup@openstack": ["neutron"]},
name="NeutronLoadbalancerV1.create_and_list_vips",
platform="openstack")
class CreateAndListVips(utils.NeutronScenario):
def run(self, pool_create_args=None, vip_create_args=None):
"""Create a vip(v1) and then list vips(v1).
Measure the "neutron lb-vip-create" and "neutron lb-vip-list" command
performance. The scenario creates a vip for every pool created and
then lists vips.
:param vip_create_args: dict, POST /lb/vips request options
:param pool_create_args: dict, POST /lb/pools request options
"""
vip_create_args = vip_create_args or {}
pool_create_args = pool_create_args or {}
networks = self.context.get("tenant", {}).get("networks", [])
pools = self._create_v1_pools(networks, **pool_create_args)
for pool in pools:
self._create_v1_vip(pool, **vip_create_args)
self._list_v1_vips()
@validation.add("restricted_parameters", param_names=["pool_id", "subnet_id"],
subdict="vip_create_args")
@validation.add("required_neutron_extensions", extensions=["lbaas"])
@validation.add("required_services",
services=[consts.Service.NEUTRON])
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_contexts", contexts=("network"))
@scenario.configure(context={"cleanup@openstack": ["neutron"]},
name="NeutronLoadbalancerV1.create_and_delete_vips",
platform="openstack")
class CreateAndDeleteVips(utils.NeutronScenario):
def run(self, pool_create_args=None, vip_create_args=None):
"""Create a vip(v1) and then delete vips(v1).
Measure the "neutron lb-vip-create" and "neutron lb-vip-delete"
command performance. The scenario creates a vip for pool and
then deletes those vips.
:param pool_create_args: dict, POST /lb/pools request options
:param vip_create_args: dict, POST /lb/vips request options
"""
vips = []
pool_create_args = pool_create_args or {}
vip_create_args = vip_create_args or {}
networks = self.context.get("tenant", {}).get("networks", [])
pools = self._create_v1_pools(networks, **pool_create_args)
for pool in pools:
vips.append(self._create_v1_vip(pool, **vip_create_args))
for vip in vips:
self._delete_v1_vip(vip["vip"])
@validation.add("restricted_parameters", param_names=["pool_id", "subnet_id"],
subdict="vip_create_args")
@validation.add("required_neutron_extensions", extensions=["lbaas"])
@validation.add("required_services",
services=[consts.Service.NEUTRON])
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_contexts", contexts=("network"))
@scenario.configure(context={"cleanup@openstack": ["neutron"]},
name="NeutronLoadbalancerV1.create_and_update_vips",
platform="openstack")
class CreateAndUpdateVips(utils.NeutronScenario):
def run(self, pool_create_args=None,
vip_update_args=None, vip_create_args=None):
"""Create vips(v1) and update vips(v1).
Measure the "neutron lb-vip-create" and "neutron lb-vip-update"
command performance. The scenario creates a pool for every subnet
and then update those pools.
:param pool_create_args: dict, POST /lb/pools request options
:param vip_create_args: dict, POST /lb/vips request options
:param vip_update_args: dict, POST /lb/vips update options
"""
vips = []
pool_create_args = pool_create_args or {}
vip_create_args = vip_create_args or {}
vip_update_args = vip_update_args or {}
networks = self.context.get("tenant", {}).get("networks", [])
pools = self._create_v1_pools(networks, **pool_create_args)
for pool in pools:
vips.append(self._create_v1_vip(pool, **vip_create_args))
for vip in vips:
self._update_v1_vip(vip, **vip_update_args)
@validation.add("required_neutron_extensions", extensions=["lbaas"])
@validation.add("required_services",
services=[consts.Service.NEUTRON])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(
context={"cleanup@openstack": ["neutron"]},
name="NeutronLoadbalancerV1.create_and_list_healthmonitors",
platform="openstack")
class CreateAndListHealthmonitors(utils.NeutronScenario):
def run(self, healthmonitor_create_args=None):
"""Create healthmonitors(v1) and list healthmonitors(v1).
Measure the "neutron lb-healthmonitor-list" command performance. This
scenario creates healthmonitors and lists them.
:param healthmonitor_create_args: dict, POST /lb/healthmonitors request
options
"""
healthmonitor_create_args = healthmonitor_create_args or {}
self._create_v1_healthmonitor(**healthmonitor_create_args)
self._list_v1_healthmonitors()
@validation.add("required_neutron_extensions", extensions=["lbaas"])
@validation.add("required_services",
services=[consts.Service.NEUTRON])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(
context={"cleanup@openstack": ["neutron"]},
name="NeutronLoadbalancerV1.create_and_delete_healthmonitors",
platform="openstack")
class CreateAndDeleteHealthmonitors(utils.NeutronScenario):
def run(self, healthmonitor_create_args=None):
"""Create a healthmonitor(v1) and delete healthmonitors(v1).
Measure the "neutron lb-healthmonitor-create" and "neutron
lb-healthmonitor-delete" command performance. The scenario creates
healthmonitors and deletes those healthmonitors.
:param healthmonitor_create_args: dict, POST /lb/healthmonitors request
options
"""
healthmonitor_create_args = healthmonitor_create_args or {}
healthmonitor = self._create_v1_healthmonitor(
**healthmonitor_create_args)
self._delete_v1_healthmonitor(healthmonitor["health_monitor"])
@validation.add("required_neutron_extensions", extensions=["lbaas"])
@validation.add("required_services",
services=[consts.Service.NEUTRON])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(
context={"cleanup@openstack": ["neutron"]},
name="NeutronLoadbalancerV1.create_and_update_healthmonitors",
platform="openstack")
class CreateAndUpdateHealthmonitors(utils.NeutronScenario):
def run(self, healthmonitor_create_args=None,
healthmonitor_update_args=None):
"""Create a healthmonitor(v1) and update healthmonitors(v1).
Measure the "neutron lb-healthmonitor-create" and "neutron
lb-healthmonitor-update" command performance. The scenario creates
healthmonitors and then updates them.
:param healthmonitor_create_args: dict, POST /lb/healthmonitors request
options
:param healthmonitor_update_args: dict, POST /lb/healthmonitors update
options
"""
healthmonitor_create_args = healthmonitor_create_args or {}
healthmonitor_update_args = healthmonitor_update_args or {
"max_retries": random.choice(range(1, 10))}
healthmonitor = self._create_v1_healthmonitor(
**healthmonitor_create_args)
self._update_v1_healthmonitor(healthmonitor,
**healthmonitor_update_args)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,753
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/watcher/audit_templates.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from rally.common import validation
from rally_openstack.common import consts
from rally_openstack.task.cleanup import manager as resource_manager
from rally_openstack.task import context
from rally_openstack.task.scenarios.watcher import utils as watcher_utils
from rally_openstack.task import types
@validation.add("required_platform", platform="openstack", admin=True)
@context.configure(name="audit_templates", platform="openstack", order=550)
class AuditTemplateGenerator(context.OpenStackContext):
"""Creates Watcher audit templates for tenants."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"audit_templates_per_admin": {"type": "integer", "minimum": 1},
"fill_strategy": {"enum": ["round_robin", "random", None]},
"params": {
"type": "array",
"minItems": 1,
"uniqueItems": True,
"items": {
"type": "object",
"properties": {
"goal": {
"type": "object",
"properties": {
"name": {
"type": "string"
}
},
"additionalProperties": False
},
"strategy": {
"type": "object",
"properties": {
"name": {
"type": "string"
}
},
"additionalProperties": False
},
},
"additionalProperties": False,
},
}
},
"additionalProperties": False,
"required": ["params"]
}
DEFAULT_CONFIG = {
"audit_templates_per_admin": 1,
"fill_strategy": "round_robin"
}
def setup(self):
watcher_scenario = watcher_utils.WatcherScenario(
{"admin": self.context["admin"], "task": self.context["task"],
"owner_id": self.context["owner_id"],
"config": {
"api_versions": self.context["config"].get(
"api_versions", [])}
})
self.context["audit_templates"] = []
for i in range(self.config["audit_templates_per_admin"]):
cfg_size = len(self.config["params"])
if self.config["fill_strategy"] == "round_robin":
audit_params = self.config["params"][i % cfg_size]
elif self.config["fill_strategy"] == "random":
audit_params = random.choice(self.config["params"])
goal_id = types.WatcherGoal(self.context).pre_process(
resource_spec=audit_params["goal"], config={})
strategy_id = types.WatcherStrategy(self.context).pre_process(
resource_spec=audit_params["strategy"], config={})
audit_template = watcher_scenario._create_audit_template(
goal_id, strategy_id)
self.context["audit_templates"].append(audit_template.uuid)
def cleanup(self):
resource_manager.cleanup(names=["watcher.action_plan",
"watcher.audit_template"],
admin=self.context.get("admin", []),
superclass=watcher_utils.WatcherScenario,
task_id=self.get_owner_id())
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.