text stringlengths 0 1.05M | meta dict |
|---|---|
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import converters
from neutron_lib.api.definitions import port
# Common definitions for maximum string field length
DHCP_OPT_NAME_MAX_LEN = 64
VALID_BLANK_EXTRA_DHCP_OPTS = ('router', 'classless-static-route')
DHCP_OPT_VALUE_MAX_LEN = 255
EXTRA_DHCP_OPT_KEY_SPECS = [
# key spec for opt_name in _VALID_BLANK_EXTRA_DHCP_OPTS
{'opt_name': {'type:values': VALID_BLANK_EXTRA_DHCP_OPTS,
'required': True},
'opt_value': {'type:string_or_none':
DHCP_OPT_VALUE_MAX_LEN,
'required': True},
'ip_version': {'convert_to': converters.convert_to_int,
'type:values': [4, 6],
'required': False}},
# key spec if opt_name not in _VALID_BLANK_EXTRA_DHCP_OPTS
{'opt_name': {'type:not_empty_string': DHCP_OPT_NAME_MAX_LEN,
'required': True},
'opt_value': {'type:not_empty_string_or_none':
DHCP_OPT_VALUE_MAX_LEN,
'required': True},
'ip_version': {'convert_to': converters.convert_to_int,
'type:values': [4, 6],
'required': False}}
]
EXTRADHCPOPTS = 'extra_dhcp_opts'
DHCP_OPT_CLIENT_ID = "client-id"
# client-id option value as defined in RFC 4776
DHCP_OPT_CLIENT_ID_NUM = 61
# The alias of the extension.
ALIAS = 'extra_dhcp_opt'
# Whether or not this extension is simply signaling behavior to the user
# or it actively modifies the attribute map (mandatory).
IS_SHIM_EXTENSION = False
# Whether the extension is marking the adoption of standardattr model for
# legacy resources, or introducing new standardattr attributes. False or
# None if the standardattr model is adopted since the introduction of
# resource extension (mandatory).
# If this is True, the alias for the extension should be prefixed with
# 'standard-attr-'.
IS_STANDARD_ATTR_EXTENSION = False
# The name of the extension (mandatory).
NAME = 'Neutron Extra DHCP options'
# A prefix for API resources. An empty prefix means that the API is going
# to be exposed at the v2/ level as any other core resource (mandatory).
API_PREFIX = ''
# The description of the extension (mandatory).
DESCRIPTION = ("Extra options configuration for DHCP. "
"For example PXE boot options to DHCP clients can "
"be specified (e.g. tftp-server, server-ip-address, "
"bootfile-name)")
# A timestamp of when the extension was introduced (mandatory).
UPDATED_TIMESTAMP = "2013-03-17T12:00:00-00:00"
# The specific resources and/or attributes for the extension (optional).
# In case of simple extensions, with single resource, the string constants
# RESOURCE_NAME and COLLECTION_NAME can be used, otherwise string literals
# can be used instead.
# The name of the resource introduced or being extended
# (in case it is defined by another extension, or it is
# a core resource).
RESOURCE_NAME = port.RESOURCE_NAME
# The plural for the resource introduced or being extended
# (in case it is defined by another extension, or it is a
# core resource).
COLLECTION_NAME = port.COLLECTION_NAME
# The resource attribute map for the extension. It is effectively the
# bulk of the API contract alongside ACTION_MAP (mandatory).
RESOURCE_ATTRIBUTE_MAP = {
COLLECTION_NAME: {
EXTRADHCPOPTS: {
'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': None,
'validate': {
'type:list_of_any_key_specs_or_none': EXTRA_DHCP_OPT_KEY_SPECS
}
}
}
}
# The subresource attribute map for the extension. It adds child resources
# to main extension's resource. The subresource map must have a parent and
# a parameters entry. If an extension does not need such a map, None can
# be specified (mandatory).
SUB_RESOURCE_ATTRIBUTE_MAP = {}
# The action map: it associates verbs with methods to be performed on
# the API resource (mandatory).
ACTION_MAP = {}
# The list of required extensions (mandatory).
REQUIRED_EXTENSIONS = []
# The list of optional extensions (mandatory).
OPTIONAL_EXTENSIONS = []
ACTION_STATUS = {}
| {
"repo_name": "openstack/neutron-lib",
"path": "neutron_lib/api/definitions/extra_dhcp_opt.py",
"copies": "1",
"size": "4755",
"license": "apache-2.0",
"hash": 8831076805822953000,
"line_mean": 35.5769230769,
"line_max": 78,
"alpha_frac": 0.6790746583,
"autogenerated": false,
"ratio": 3.797923322683706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9976997980983706,
"avg_score": 0,
"num_lines": 130
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import converters
from neutron_lib.api import validators
from neutron_lib import exceptions as n_exc
from neutron._i18n import _
from neutron.common import utils as n_utils
from neutron.extensions import portbindings
from neutron import manager
from neutron.objects import trunk as trunk_objects
from neutron.plugins.ml2 import driver_api as api
from neutron.services.trunk import exceptions as trunk_exc
from neutron.services.trunk import utils
# This layer is introduced for keeping business logic and
# data persistence decoupled.
def trunk_can_be_managed(context, trunk):
"""Validate that the trunk can be managed."""
if not trunk.admin_state_up:
raise trunk_exc.TrunkDisabled(trunk_id=trunk.id)
def enforce_port_deletion_rules(resource, event, trigger, **kwargs):
"""Prohibit the deletion of a port that's used in a trunk."""
# NOTE: the ML2 plugin properly catches these exceptions when raised, but
# non-ML2 plugins might not. To address this we should move the callback
# registry notification emitted in the ML2 plugin's delete_port() higher
# up in the plugin hierarchy.
context = kwargs['context']
port_id = kwargs['port_id']
subport_obj = trunk_objects.SubPort.get_object(context, port_id=port_id)
if subport_obj:
raise trunk_exc.PortInUseAsSubPort(port_id=port_id,
trunk_id=subport_obj.trunk_id)
trunk_obj = trunk_objects.Trunk.get_object(context, port_id=port_id)
if trunk_obj:
raise trunk_exc.PortInUseAsTrunkParent(port_id=port_id,
trunk_id=trunk_obj.id)
class TrunkPortValidator(object):
def __init__(self, port_id):
self.port_id = port_id
self._port = None
def validate(self, context, parent_port=True):
"""Validate that the port can be used in a trunk.
:param parent_port: True if the port is intended for use
as parent in a trunk.
"""
# TODO(tidwellr): there is a chance of a race between the
# time these checks are performed and the time the trunk
# creation is executed. To be revisited, if it bites.
# Validate that the given port_id is not used by a subport.
subports = trunk_objects.SubPort.get_objects(
context, port_id=self.port_id)
if subports:
raise trunk_exc.TrunkPortInUse(port_id=self.port_id)
# Validate that the given port_id is not used by a trunk.
trunks = trunk_objects.Trunk.get_objects(context, port_id=self.port_id)
if trunks:
raise trunk_exc.ParentPortInUse(port_id=self.port_id)
if parent_port:
# if the port is being used as a parent in a trunk, check if
# it can be trunked, i.e. if it is already associated to physical
# resources (namely it is bound). Bound ports may be used as
# trunk parents, but that depends on the underlying driver in
# charge.
if not self.can_be_trunked(context):
raise trunk_exc.ParentPortInUse(port_id=self.port_id)
else:
# if the port is being used as subport in a trunk, check if it is a
# port that is not actively used for other purposes, e.g. a router
# port, compute port, DHCP port etc. We have no clue what the side
# effects of connecting the port to a trunk would be, and it is
# better to err on the side of caution and prevent the operation.
self.check_not_in_use(context)
return self.port_id
def is_bound(self, context):
"""Return true if the port is bound, false otherwise."""
# Validate that the given port_id does not have a port binding.
core_plugin = manager.NeutronManager.get_plugin()
self._port = core_plugin.get_port(context, self.port_id)
return bool(self._port.get(portbindings.HOST_ID))
def can_be_trunked(self, context):
""""Return true if a port can be trunked."""
if not self.is_bound(context):
# An unbound port can be trunked, always.
return True
trunk_plugin = manager.NeutronManager.get_service_plugins()['trunk']
vif_type = self._port.get(portbindings.VIF_TYPE)
binding_host = self._port.get(portbindings.HOST_ID)
# Determine the driver that will be in charge of the trunk: this
# can be determined based on the vif type, whether or not the
# driver is agent-based, and whether the host is running the agent
# associated to the driver itself.
host_agent_types = utils.get_agent_types_by_host(context, binding_host)
drivers = [
driver for driver in trunk_plugin.registered_drivers
if utils.is_driver_compatible(
context, driver, vif_type, host_agent_types)
]
if len(drivers) > 1:
raise trunk_exc.TrunkPluginDriverConflict()
elif len(drivers) == 1:
return drivers[0].can_trunk_bound_port
else:
return False
def check_not_in_use(self, context):
"""Raises PortInUse for ports assigned for device purposes."""
core_plugin = manager.NeutronManager.get_plugin()
self._port = core_plugin.get_port(context, self.port_id)
# NOTE(armax): the trunk extension itself does not make use of the
# device_id field, because it has no reason to. If need be, this
# check can be altered to accommodate the change in logic.
if self._port['device_id']:
raise n_exc.PortInUse(net_id=self._port['network_id'],
port_id=self._port['id'],
device_id=self._port['device_id'])
class SubPortsValidator(object):
def __init__(self, segmentation_types, subports, trunk_port_id=None):
self._segmentation_types = segmentation_types
self.subports = subports
self.trunk_port_id = trunk_port_id
def validate(self, context,
basic_validation=False, trunk_validation=True):
"""Validate that subports can be used in a trunk."""
# Perform basic validation on subports, in case subports
# are not automatically screened by the API layer.
if basic_validation:
msg = validators.validate_subports(self.subports)
if msg:
raise n_exc.InvalidInput(error_message=msg)
trunk_port_mtu = self._get_port_mtu(context, self.trunk_port_id)
if trunk_validation:
return [self._validate(context, s, trunk_port_mtu)
for s in self.subports]
else:
return self.subports
def _get_port_mtu(self, context, port_id):
"""
Return MTU for the network where the given port belongs to.
If the network or port cannot be obtained, or if MTU is not defined,
returns None.
"""
core_plugin = manager.NeutronManager.get_plugin()
if not n_utils.is_extension_supported(core_plugin, 'net-mtu'):
return
try:
port = core_plugin.get_port(context, port_id)
net = core_plugin.get_network(context, port['network_id'])
except (n_exc.PortNotFound, n_exc.NetworkNotFound):
# A concurrent request might have made the port or network
# disappear; though during DB insertion, the subport request
# will fail on integrity constraint, it is safer to return
# a None MTU here.
return
return net[api.MTU]
def _validate(self, context, subport, trunk_port_mtu):
# Check that the subport doesn't reference the same port_id as a
# trunk we may be in the middle of trying to create, in other words
# make the validation idiot proof.
if subport['port_id'] == self.trunk_port_id:
raise trunk_exc.ParentPortInUse(port_id=subport['port_id'])
# Check MTU sanity - subport MTU must not exceed trunk MTU.
# If for whatever reason trunk_port_mtu is not available,
# the MTU sanity check cannot be enforced.
if trunk_port_mtu:
port_mtu = self._get_port_mtu(context, subport['port_id'])
if port_mtu and port_mtu > trunk_port_mtu:
raise trunk_exc.SubPortMtuGreaterThanTrunkPortMtu(
port_id=subport['port_id'],
port_mtu=port_mtu,
trunk_id=self.trunk_port_id,
trunk_mtu=trunk_port_mtu
)
# If the segmentation details are missing, we will need to
# figure out defaults when the time comes to support Ironic.
# We can reasonably expect segmentation details to be provided
# in all other cases for now.
try:
segmentation_type = subport["segmentation_type"]
segmentation_id = (
converters.convert_to_int(subport["segmentation_id"]))
except KeyError:
msg = _("Invalid subport details '%s': missing segmentation "
"information. Must specify both segmentation_id and "
"segmentation_type") % subport
raise n_exc.InvalidInput(error_message=msg)
except n_exc.InvalidInput:
msg = _("Invalid subport details: segmentation_id '%s' is "
"not an integer") % subport["segmentation_id"]
raise n_exc.InvalidInput(error_message=msg)
if segmentation_type not in self._segmentation_types:
msg = _("Unknown segmentation_type '%s'") % segmentation_type
raise n_exc.InvalidInput(error_message=msg)
if not self._segmentation_types[segmentation_type](segmentation_id):
msg = _("Segmentation ID '%s' is not in range") % segmentation_id
raise n_exc.InvalidInput(error_message=msg)
# Check if the subport is already participating in an active trunk
trunk_validator = TrunkPortValidator(subport['port_id'])
trunk_validator.validate(context, parent_port=False)
return subport
| {
"repo_name": "cloudbase/neutron",
"path": "neutron/services/trunk/rules.py",
"copies": "3",
"size": "10815",
"license": "apache-2.0",
"hash": -5459270957701423000,
"line_mean": 43.3237704918,
"line_max": 79,
"alpha_frac": 0.6314378178,
"autogenerated": false,
"ratio": 4.09503975766755,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.622647757546755,
"avg_score": null,
"num_lines": null
} |
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import converters
from neutron_lib.db import constants as db_const
FLAVOR = 'flavor'
FLAVORS = FLAVOR + 's'
SERVICE_PROFILES = 'service_profiles'
NEXT_PROVIDERS = 'next_providers'
ALIAS = FLAVORS
IS_SHIM_EXTENSION = False
IS_STANDARD_ATTR_EXTENSION = False
NAME = 'Neutron Service Flavors'
API_PREFIX = ''
DESCRIPTION = 'Flavor specification for Neutron advanced services.'
UPDATED_TIMESTAMP = '2015-09-17T10:00:00-00:00'
RESOURCE_ATTRIBUTE_MAP = {
FLAVORS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True, 'is_filter': True,
'is_sort_key': True, 'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': db_const.NAME_FIELD_SIZE},
'is_filter': True, 'is_sort_key': True,
'is_visible': True, 'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string_or_none':
db_const.LONG_DESCRIPTION_FIELD_SIZE},
'is_filter': True, 'is_sort_key': True,
'is_visible': True, 'default': ''},
'service_type': {'allow_post': True, 'allow_put': False,
'validate':
{'type:service_plugin_type': None},
'is_filter': True, 'is_sort_key': True,
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {
'type:string': db_const.PROJECT_ID_FIELD_SIZE},
'is_visible': True},
'service_profiles': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_list': None},
'is_visible': True, 'default': []},
'enabled': {'allow_post': True, 'allow_put': True,
'convert_to': converters.convert_to_boolean_if_not_none,
'default': True, 'is_filter': True, 'is_sort_key': True,
'is_visible': True},
},
SERVICE_PROFILES: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True, 'is_filter': True,
'is_sort_key': True, 'primary_key': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string_or_none':
db_const.LONG_DESCRIPTION_FIELD_SIZE},
'is_filter': True, 'is_sort_key': True,
'is_visible': True, 'default': ''},
'driver': {'allow_post': True, 'allow_put': True,
'validate': {'type:string':
db_const.LONG_DESCRIPTION_FIELD_SIZE},
'is_visible': True, 'is_filter': True,
'is_sort_key': True, 'default': ''},
'metainfo': {'allow_post': True, 'allow_put': True,
'is_visible': True, 'is_sort_key': True,
'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {
'type:string': db_const.PROJECT_ID_FIELD_SIZE},
'is_visible': True},
'enabled': {'allow_post': True, 'allow_put': True,
'convert_to': converters.convert_to_boolean_if_not_none,
'is_filter': True, 'is_sort_key': True,
'is_visible': True, 'default': True},
},
}
SUB_RESOURCE_ATTRIBUTE_MAP = {
NEXT_PROVIDERS: {
'parent': {'collection_name': FLAVORS,
'member_name': FLAVOR},
'parameters': {'provider': {'allow_post': False,
'allow_put': False,
'is_visible': True},
'driver': {'allow_post': False,
'allow_put': False,
'is_visible': True},
'metainfo': {'allow_post': False,
'allow_put': False,
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {
'type:string':
db_const.PROJECT_ID_FIELD_SIZE},
'is_visible': True}}
},
SERVICE_PROFILES: {
'parent': {'collection_name': FLAVORS,
'member_name': FLAVOR},
'parameters': {'id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {
'type:string':
db_const.PROJECT_ID_FIELD_SIZE},
'is_visible': True}}
}
}
ACTION_MAP = {}
REQUIRED_EXTENSIONS = []
OPTIONAL_EXTENSIONS = []
ACTION_STATUS = {}
| {
"repo_name": "openstack/neutron-lib",
"path": "neutron_lib/api/definitions/flavors.py",
"copies": "1",
"size": "6221",
"license": "apache-2.0",
"hash": -774745707376817300,
"line_mean": 47.2248062016,
"line_max": 78,
"alpha_frac": 0.4647162836,
"autogenerated": false,
"ratio": 4.2090663058186735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 129
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.callbacks import events
from neutron_lib.callbacks import priority_group
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as lib_const
from neutron_lib.db import api as db_api
from neutron_lib import exceptions as lib_exc
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _
from neutron.db import servicetype_db as st_db
from neutron.services import provider_configuration
from neutron.services import service_base
LOG = logging.getLogger(__name__)
@registry.has_registry_receivers
class DriverController(object):
"""Driver controller for the L3 service plugin.
This component is responsible for dispatching router requests to L3
service providers and for performing the bookkeeping about which
driver is associated with a given router.
This is not intended to be accessed by the drivers or the l3 plugin.
All of the methods are marked as private to reflect this.
"""
def __init__(self, l3_plugin):
self.l3_plugin = l3_plugin
self._stm = st_db.ServiceTypeManager.get_instance()
self._stm.add_provider_configuration(
plugin_constants.L3, _LegacyPlusProviderConfiguration())
self._load_drivers()
def _load_drivers(self):
self.drivers, self.default_provider = (
service_base.load_drivers(plugin_constants.L3, self.l3_plugin))
# store the provider name on each driver to make finding inverse easy
for provider_name, driver in self.drivers.items():
setattr(driver, 'name', provider_name)
@property
def _flavor_plugin(self):
if not hasattr(self, '_flavor_plugin_ref'):
self._flavor_plugin_ref = directory.get_plugin(
plugin_constants.FLAVORS)
return self._flavor_plugin_ref
@registry.receives(resources.ROUTER, [events.BEFORE_CREATE],
priority_group.PRIORITY_ROUTER_CONTROLLER)
def _check_router_request(self, resource, event, trigger, context,
router, **kwargs):
"""Validates that API request is sane (flags compat with flavor)."""
drv = self._get_provider_for_create(context, router)
_ensure_driver_supports_request(drv, router)
@registry.receives(resources.ROUTER, [events.PRECOMMIT_CREATE],
priority_group.PRIORITY_ROUTER_CONTROLLER)
def _set_router_provider(self, resource, event, trigger, context, router,
router_db, **kwargs):
"""Associates a router with a service provider.
Association is done by flavor_id if it's specified, otherwise it will
fallback to determining which loaded driver supports the ha/distributed
attributes associated with the router.
"""
if _flavor_specified(router):
router_db.flavor_id = router['flavor_id']
drv = self._get_provider_for_create(context, router)
self._stm.add_resource_association(context, plugin_constants.L3,
drv.name, router['id'])
registry.publish(
resources.ROUTER_CONTROLLER, events.PRECOMMIT_ADD_ASSOCIATION,
trigger, payload=events.DBEventPayload(
context, request_body=router, states=(router_db,),
metadata={'old_driver': None, 'new_driver': drv},
resource_id=router_db.get('id')))
@registry.receives(resources.ROUTER, [events.PRECOMMIT_DELETE],
priority_group.PRIORITY_ROUTER_CONTROLLER)
def _clear_router_provider(self, resource, event, trigger, context,
router_id, **kwargs):
"""Remove the association between a router and a service provider."""
drv = self.get_provider_for_router(context, router_id)
registry.publish(
resources.ROUTER_CONTROLLER, events.PRECOMMIT_DELETE_ASSOCIATIONS,
trigger, payload=events.DBEventPayload(
context,
metadata={'old_driver': drv, 'new_driver': None},
resource_id=router_id))
self._stm.del_resource_associations(context, [router_id])
@registry.receives(resources.ROUTER, [events.PRECOMMIT_UPDATE],
priority_group.PRIORITY_ROUTER_CONTROLLER)
def _update_router_provider(self, resource, event, trigger, payload=None):
"""Handle transition between providers.
The provider can currently be changed only by the caller updating
'ha' and/or 'distributed' attributes. If we allow updates of flavor_id
directly in the future those requests will also land here.
"""
drv = self.get_provider_for_router(payload.context,
payload.resource_id)
new_drv = None
if _flavor_specified(payload.request_body):
if (payload.request_body['flavor_id'] !=
payload.states[0]['flavor_id']):
# TODO(kevinbenton): this is currently disallowed by the API
# so we shouldn't hit it but this is a placeholder to add
# support later.
raise NotImplementedError()
# the following is to support updating the 'ha' and 'distributed'
# attributes via the API.
try:
_ensure_driver_supports_request(drv, payload.request_body)
except lib_exc.InvalidInput:
# the current driver does not support this request, we need to
# migrate to a new provider. populate the distributed and ha
# flags from the previous state if not in the update so we can
# determine the target provider appropriately.
# NOTE(kevinbenton): if the router is associated with a flavor
# we bail because changing the provider without changing
# the flavor will make things inconsistent. We can probably
# update the flavor automatically in the future.
if payload.states[0]['flavor_id']:
raise lib_exc.InvalidInput(error_message=_(
"Changing the 'ha' and 'distributed' attributes on a "
"router associated with a flavor is not supported"))
if 'distributed' not in payload.request_body:
payload.request_body['distributed'] = (payload.states[0]
['distributed'])
if 'ha' not in payload.request_body:
payload.request_body['ha'] = payload.states[0]['ha']
LOG.debug("Get a provider driver handle based on the ha flag: "
"%(ha_flag)s and distributed flag: %(distributed_flag)s",
{'ha_flag': payload.request_body['ha'],
'distributed_flag':
payload.request_body['distributed']})
new_drv = self._attrs_to_driver(payload.request_body)
if new_drv:
LOG.debug("Router %(id)s migrating from %(old)s provider to "
"%(new)s provider.", {'id': payload.resource_id,
'old': drv,
'new': new_drv})
_ensure_driver_supports_request(new_drv, payload.request_body)
# TODO(kevinbenton): notify old driver explicitly of driver change
with db_api.CONTEXT_WRITER.using(payload.context):
registry.publish(
resources.ROUTER_CONTROLLER,
events.PRECOMMIT_DELETE_ASSOCIATIONS,
trigger, payload=payload)
self._stm.del_resource_associations(
payload.context, [payload.resource_id])
self._stm.add_resource_association(
payload.context, plugin_constants.L3,
new_drv.name, payload.resource_id, expire_session=False)
registry.publish(
resources.ROUTER_CONTROLLER,
events.PRECOMMIT_ADD_ASSOCIATION,
trigger, payload=payload)
def get_provider_for_router(self, context, router_id):
"""Return the provider driver handle for a router id."""
driver_name = self._stm.get_provider_names_by_resource_ids(
context, [router_id]).get(router_id)
if not driver_name:
# this is an old router that hasn't been mapped to a provider
# yet so we do this now
router = self.l3_plugin.get_router(context, router_id)
driver = self._attrs_to_driver(router)
driver_name = driver.name
with db_api.CONTEXT_WRITER.using(context):
self._stm.add_resource_association(
context, plugin_constants.L3,
driver_name, router_id)
registry.publish(
resources.ROUTER_CONTROLLER,
events.PRECOMMIT_ADD_ASSOCIATION,
self, payload=events.DBEventPayload(
context, states=(router,),
metadata={'old_driver': None, 'new_driver': driver},
resource_id=router_id))
return self.drivers[driver_name]
def _get_provider_for_create(self, context, router):
"""Get provider based on flavor or ha/distributed flags."""
if not _flavor_specified(router):
return self._attrs_to_driver(router)
return self._get_l3_driver_by_flavor(context, router['flavor_id'])
def _get_l3_driver_by_flavor(self, context, flavor_id):
"""Get a provider driver handle for a given flavor_id."""
flavor = self._flavor_plugin.get_flavor(context, flavor_id)
provider = self._flavor_plugin.get_flavor_next_provider(
context, flavor['id'])[0]
# TODO(kevinbenton): the callback framework suppresses the nice errors
# these generate when they fail to lookup. carry them through
driver = self.drivers[provider['provider']]
return driver
def _attrs_to_driver(self, router):
"""Get a provider driver handle based on the ha/distributed flags."""
distributed = _is_distributed(
router.get('distributed', lib_const.ATTR_NOT_SPECIFIED))
ha = _is_ha(router.get('ha', lib_const.ATTR_NOT_SPECIFIED))
drivers = list(self.drivers.values())
# make sure default is tried before the rest if defined
if self.default_provider:
drivers.insert(0, self.drivers[self.default_provider])
for driver in drivers:
if _is_driver_compatible(distributed, ha, driver):
return driver
raise NotImplementedError(
_("Could not find a service provider that supports "
"distributed=%(d)s and ha=%(h)s") % {'d': distributed, 'h': ha}
)
def uses_scheduler(self, context, router_id):
"""Returns True if the integrated L3 scheduler should be used."""
return (self.get_provider_for_router(context, router_id).
use_integrated_agent_scheduler)
class _LegacyPlusProviderConfiguration(
provider_configuration.ProviderConfiguration):
def __init__(self):
# loads up ha, dvr, and single_node service providers automatically.
# If an operator has setup explicit values that conflict with these,
# the operator defined values will take priority.
super(_LegacyPlusProviderConfiguration, self).__init__(
svc_type=plugin_constants.L3)
for name, driver in (('dvrha', 'dvrha.DvrHaDriver'),
('dvr', 'dvr.DvrDriver'), ('ha', 'ha.HaDriver'),
('single_node', 'single_node.SingleNodeDriver')):
path = 'neutron.services.l3_router.service_providers.%s' % driver
try:
self.add_provider({'service_type': plugin_constants.L3,
'name': name, 'driver': path,
'default': False})
except lib_exc.Invalid:
LOG.debug("Could not add L3 provider '%s', it may have "
"already been explicitly defined.", name)
def _is_driver_compatible(distributed, ha, driver):
if not driver.distributed_support.is_compatible(distributed):
return False
if not driver.ha_support.is_compatible(ha):
return False
return True
def _is_distributed(distributed_attr):
if distributed_attr is False:
return False
if distributed_attr == lib_const.ATTR_NOT_SPECIFIED:
return cfg.CONF.router_distributed
return True
def _is_ha(ha_attr):
if ha_attr is False:
return False
if ha_attr == lib_const.ATTR_NOT_SPECIFIED:
return cfg.CONF.l3_ha
return True
def _flavor_specified(router):
return ('flavor_id' in router and
router['flavor_id'] != lib_const.ATTR_NOT_SPECIFIED)
def _ensure_driver_supports_request(drv, router_body):
r = router_body
for key, attr in (('distributed', 'distributed_support'),
('ha', 'ha_support')):
flag = r.get(key)
if flag not in [True, False]:
continue # not specified in body
if not getattr(drv, attr).is_compatible(flag):
raise lib_exc.InvalidInput(error_message=(
_("Provider %(name)s does not support %(key)s=%(flag)s")
% dict(name=drv.name, key=key, flag=flag)))
| {
"repo_name": "mahak/neutron",
"path": "neutron/services/l3_router/service_providers/driver_controller.py",
"copies": "2",
"size": "14323",
"license": "apache-2.0",
"hash": 6340038595787609000,
"line_mean": 45.8071895425,
"line_max": 79,
"alpha_frac": 0.6109055365,
"autogenerated": false,
"ratio": 4.389518847686178,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6000424384186178,
"avg_score": null,
"num_lines": null
} |
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib._i18n import _
from neutron_lib import exceptions
class AgentNotFound(exceptions.NotFound):
message = _("Agent %(id)s could not be found.")
class AgentNotFoundByTypeHost(exceptions.NotFound):
message = _("Agent with agent_type=%(agent_type)s and host=%(host)s "
"could not be found.")
class MultipleAgentFoundByTypeHost(exceptions.Conflict):
message = _("Multiple agents with agent_type=%(agent_type)s and "
"host=%(host)s found.")
| {
"repo_name": "openstack/neutron-lib",
"path": "neutron_lib/exceptions/agent.py",
"copies": "1",
"size": "1097",
"license": "apache-2.0",
"hash": 4682509973397057000,
"line_mean": 35.5666666667,
"line_max": 78,
"alpha_frac": 0.7046490428,
"autogenerated": false,
"ratio": 4.047970479704797,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 30
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants as lib_const
from neutron_lib import exceptions as lib_exc
from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.db import servicetype_db as st_db
from neutron import manager
from neutron.plugins.common import constants
from neutron.services import provider_configuration
from neutron.services import service_base
LOG = logging.getLogger(__name__)
class DriverController(object):
"""Driver controller for the L3 service plugin.
This component is responsible for dispatching router requests to L3
service providers and for performing the bookkeeping about which
driver is associated with a given router.
This is not intended to be accessed by the drivers or the l3 plugin.
All of the methods are marked as private to reflect this.
"""
def __init__(self, l3_plugin):
self.l3_plugin = l3_plugin
self._stm = st_db.ServiceTypeManager.get_instance()
self._stm.add_provider_configuration(
constants.L3_ROUTER_NAT, _LegacyPlusProviderConfiguration())
self._load_drivers()
registry.subscribe(self._set_router_provider,
resources.ROUTER, events.PRECOMMIT_CREATE)
registry.subscribe(self._update_router_provider,
resources.ROUTER, events.PRECOMMIT_UPDATE)
registry.subscribe(self._clear_router_provider,
resources.ROUTER, events.PRECOMMIT_DELETE)
def _load_drivers(self):
self.drivers, self.default_provider = (
service_base.load_drivers(constants.L3_ROUTER_NAT, self.l3_plugin))
# store the provider name on each driver to make finding inverse easy
for provider_name, driver in self.drivers.items():
setattr(driver, 'name', provider_name)
@property
def _flavor_plugin(self):
if not hasattr(self, '_flavor_plugin_ref'):
_service_plugins = manager.NeutronManager.get_service_plugins()
self._flavor_plugin_ref = _service_plugins[constants.FLAVORS]
return self._flavor_plugin_ref
def _set_router_provider(self, resource, event, trigger, context, router,
router_db, **kwargs):
"""Associates a router with a service provider.
Association is done by flavor_id if it's specified, otherwise it will
fallback to determining which loaded driver supports the ha/distributed
attributes associated with the router.
"""
if _flavor_specified(router):
router_db.flavor_id = router['flavor_id']
drv = self._get_provider_for_create(context, router)
_ensure_driver_supports_request(drv, router)
self._stm.add_resource_association(context, 'L3_ROUTER_NAT',
drv.name, router['id'])
def _clear_router_provider(self, resource, event, trigger, context,
router_id, **kwargs):
"""Remove the association between a router and a service provider."""
self._stm.del_resource_associations(context, [router_id])
def _update_router_provider(self, resource, event, trigger, context,
router_id, router, old_router, router_db,
**kwargs):
"""Handle transition between providers.
The provider can currently be changed only by the caller updating
'ha' and/or 'distributed' attributes. If we allow updates of flavor_id
directly in the future those requests will also land here.
"""
drv = self._get_provider_for_router(context, router_id)
new_drv = None
if _flavor_specified(router):
if router['flavor_id'] != old_router['flavor_id']:
# TODO(kevinbenton): this is currently disallowed by the API
# so we shouldn't hit it but this is a placeholder to add
# support later.
raise NotImplementedError()
# the following is to support updating the 'ha' and 'distributed'
# attributes via the API.
try:
_ensure_driver_supports_request(drv, router)
except lib_exc.InvalidInput:
# the current driver does not support this request, we need to
# migrate to a new provider. populate the distributed and ha
# flags from the previous state if not in the update so we can
# determine the target provider appropriately.
# NOTE(kevinbenton): if the router is associated with a flavor
# we bail because changing the provider without changing
# the flavor will make things inconsistent. We can probably
# update the flavor automatically in the future.
if old_router['flavor_id']:
raise lib_exc.InvalidInput(error_message=_(
"Changing the 'ha' and 'distributed' attributes on a "
"router associated with a flavor is not supported"))
if 'distributed' not in router:
router['distributed'] = old_router['distributed']
if 'ha' not in router:
router['ha'] = old_router['distributed']
new_drv = self._attrs_to_driver(router)
if new_drv:
LOG.debug("Router %(id)s migrating from %(old)s provider to "
"%(new)s provider.", {'id': router_id, 'old': drv,
'new': new_drv})
_ensure_driver_supports_request(new_drv, router)
# TODO(kevinbenton): notify old driver explicity of driver change
with context.session.begin(subtransactions=True):
self._stm.del_resource_associations(context, [router_id])
self._stm.add_resource_association(
context, 'L3_ROUTER_NAT', new_drv.name, router_id)
def _get_provider_for_router(self, context, router_id):
"""Return the provider driver handle for a router id."""
driver_name = self._stm.get_provider_names_by_resource_ids(
context, [router_id]).get(router_id)
if not driver_name:
# this is an old router that hasn't been mapped to a provider
# yet so we do this now
router = self.l3_plugin.get_router(context, router_id)
driver = self._attrs_to_driver(router)
driver_name = driver.name
self._stm.add_resource_association(context, 'L3_ROUTER_NAT',
driver_name, router_id)
return self.drivers[driver_name]
def _get_provider_for_create(self, context, router):
"""Get provider based on flavor or ha/distributed flags."""
if not _flavor_specified(router):
return self._attrs_to_driver(router)
return self._get_l3_driver_by_flavor(context, router['flavor_id'])
def _get_l3_driver_by_flavor(self, context, flavor_id):
"""Get a provider driver handle for a given flavor_id."""
flavor = self._flavor_plugin.get_flavor(context, flavor_id)
provider = self._flavor_plugin.get_flavor_next_provider(
context, flavor['id'])[0]
# TODO(kevinbenton): the callback framework suppresses the nice errors
# these generate when they fail to lookup. carry them through
driver = self.drivers[provider['provider']]
return driver
def _attrs_to_driver(self, router):
"""Get a provider driver handle based on the ha/distributed flags."""
distributed = _is_distributed(router['distributed'])
ha = _is_ha(router['ha'])
drivers = self.drivers.values()
# make sure default is tried before the rest if defined
if self.default_provider:
drivers.insert(0, self.drivers[self.default_provider])
for driver in drivers:
if _is_driver_compatible(distributed, ha, driver):
return driver
raise NotImplementedError(
_("Could not find a service provider that supports "
"distributed=%(d)s and ha=%(h)s") % {'d': distributed, 'h': ha}
)
def uses_scheduler(self, context, router_id):
"""Returns True if the integrated L3 scheduler should be used."""
return (self._get_provider_for_router(context, router_id).
use_integrated_agent_scheduler)
class _LegacyPlusProviderConfiguration(
provider_configuration.ProviderConfiguration):
def __init__(self):
# loads up ha, dvr, and single_node service providers automatically.
# If an operator has setup explicit values that conflict with these,
# the operator defined values will take priority.
super(_LegacyPlusProviderConfiguration, self).__init__()
for name, driver in (('dvrha', 'dvrha.DvrHaDriver'),
('dvr', 'dvr.DvrDriver'), ('ha', 'ha.HaDriver'),
('single_node', 'single_node.SingleNodeDriver')):
path = 'neutron.services.l3_router.service_providers.%s' % driver
try:
self.add_provider({'service_type': constants.L3_ROUTER_NAT,
'name': name, 'driver': path,
'default': False})
except lib_exc.Invalid:
LOG.debug("Could not add L3 provider '%s', it may have "
"already been explicitly defined.", name)
def _is_driver_compatible(distributed, ha, driver):
if not driver.distributed_support.is_compatible(distributed):
return False
if not driver.ha_support.is_compatible(ha):
return False
return True
def _is_distributed(distributed_attr):
if distributed_attr is False:
return False
if distributed_attr == lib_const.ATTR_NOT_SPECIFIED:
return cfg.CONF.router_distributed
return True
def _is_ha(ha_attr):
if ha_attr is False:
return False
if ha_attr == lib_const.ATTR_NOT_SPECIFIED:
return cfg.CONF.l3_ha
return True
def _flavor_specified(router):
return ('flavor_id' in router and
router['flavor_id'] != lib_const.ATTR_NOT_SPECIFIED)
def _ensure_driver_supports_request(drv, router_body):
r = router_body
for key, attr in (('distributed', 'distributed_support'),
('ha', 'ha_support')):
flag = r.get(key)
if flag not in [True, False]:
continue # not specified in body
if not getattr(drv, attr).is_compatible(flag):
raise lib_exc.InvalidInput(error_message=(
_("Provider %(name)s does not support %(key)s=%(flag)s")
% dict(name=drv.name, key=key, flag=flag)))
| {
"repo_name": "igor-toga/local-snat",
"path": "neutron/services/l3_router/service_providers/driver_controller.py",
"copies": "1",
"size": "11526",
"license": "apache-2.0",
"hash": 8267877433914586000,
"line_mean": 44.3779527559,
"line_max": 79,
"alpha_frac": 0.6219850772,
"autogenerated": false,
"ratio": 4.354363430298451,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5476348507498451,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.network.v2 import _proxy
import tricircle.tests.network_sdk.v2.flow_classifier as _fc
import tricircle.tests.network_sdk.v2.port_chain as _pc
import tricircle.tests.network_sdk.v2.port_pair as _pp
import tricircle.tests.network_sdk.v2.port_pair_group as _ppg
import tricircle.tests.network_sdk.v2.trunk as _trunk
class Proxy(_proxy.Proxy):
# trunk
def create_trunk(self, **attrs):
return self._create(_trunk.Trunk, **attrs)
def delete_trunk(self, trunk, ignore_missing=True):
self._delete(_trunk.Trunk, trunk, ignore_missing=ignore_missing)
def update_trunk(self, trunk, **attrs):
return self._update(_trunk.Trunk, trunk, **attrs)
def trunks(self, **query):
return self._list(_trunk.Trunk, **query)
def add_subports(self, trunk, subports=[]):
trunk = self._get_resource(_trunk.Trunk, trunk)
body = {'sub_ports': subports}
return trunk.add_subports(self, **body)
def remove_subports(self, trunk, subports=[]):
trunk = self._get_resource(_trunk.Trunk, trunk)
body = {'sub_ports': subports}
return trunk.remove_subports(self, **body)
# port pair
def create_port_pair(self, **attrs):
return self._create(_pp.PortPair, **attrs)
def delete_port_pair(self, pp, ignore_missing=True):
self._delete(_pp.PortPair, pp, ignore_missing=ignore_missing)
def update_port_pair(self, pp, **attrs):
return self._update(_pp.PortPair, pp, **attrs)
def port_pairs(self, **query):
return self._list(_pp.PortPair, **query)
# port pair group
def create_port_pair_group(self, **attrs):
return self._create(_ppg.PortPairGroup, **attrs)
def delete_port_pair_group(self, ppg, ignore_missing=True):
self._delete(_ppg.PortPairGroup, ppg, ignore_missing=ignore_missing)
def update_port_pair_group(self, ppg, **attrs):
return self._update(_ppg.PortPairGroup, ppg, **attrs)
def port_pair_groups(self, **query):
return self._list(_ppg.PortPairGroup, **query)
# port chain
def create_port_chain(self, **attrs):
return self._create(_pc.PortChain, **attrs)
def delete_port_chain(self, pc, ignore_missing=True):
self._delete(_pc.PortChain, pc, ignore_missing=ignore_missing)
def update_port_chain(self, pc, **attrs):
return self._update(_pc.PortChain, pc, **attrs)
def port_chains(self, **query):
return self._list(_pc.PortChain, **query)
# flow classifier
def create_flow_classifier(self, **attrs):
return self._create(_fc.FlowClassifier, **attrs)
def delete_flow_classifier(self, fc, ignore_missing=True):
self._delete(_fc.FlowClassifier, fc, ignore_missing=ignore_missing)
def update_flow_classifier(self, fc, **attrs):
return self._update(_fc.FlowClassifier, fc, **attrs)
def flow_classifiers(self, **query):
return self._list(_fc.FlowClassifier, **query)
| {
"repo_name": "stackforge/tricircle",
"path": "tricircle/tests/network_sdk/v2/_proxy.py",
"copies": "1",
"size": "3572",
"license": "apache-2.0",
"hash": -6192819410576997000,
"line_mean": 35.4489795918,
"line_max": 78,
"alpha_frac": 0.6690929451,
"autogenerated": false,
"ratio": 3.5192118226600986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46883047677600986,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from os_brick.i18n import _
from os_brick.initiator.connectors import base
from os_brick import utils
class LocalConnector(base.BaseLinuxConnector):
""""Connector class to attach/detach File System backed volumes."""
def __init__(self, root_helper, driver=None,
*args, **kwargs):
super(LocalConnector, self).__init__(root_helper, driver=driver,
*args, **kwargs)
@staticmethod
def get_connector_properties(root_helper, *args, **kwargs):
"""The Local connector properties."""
return {}
def get_volume_paths(self, connection_properties):
path = connection_properties['device_path']
return [path]
def get_search_path(self):
return None
def get_all_available_volumes(self, connection_properties=None):
# TODO(walter-boring): not sure what to return here.
return []
@utils.trace
def connect_volume(self, connection_properties):
"""Connect to a volume.
:param connection_properties: The dictionary that describes all of the
target volume attributes. ``connection_properties`` must include:
- ``device_path`` - path to the volume to be connected
:type connection_properties: dict
:returns: dict
"""
if 'device_path' not in connection_properties:
msg = (_("Invalid connection_properties specified "
"no device_path attribute"))
raise ValueError(msg)
device_info = {'type': 'local',
'path': connection_properties['device_path']}
return device_info
@utils.trace
def disconnect_volume(self, connection_properties, device_info,
force=False, ignore_errors=False):
"""Disconnect a volume from the local host.
:param connection_properties: The dictionary that describes all
of the target volume attributes.
:type connection_properties: dict
:param device_info: historical difference, but same as connection_props
:type device_info: dict
"""
pass
def extend_volume(self, connection_properties):
# TODO(walter-boring): is this possible?
raise NotImplementedError
| {
"repo_name": "openstack/os-brick",
"path": "os_brick/initiator/connectors/local.py",
"copies": "1",
"size": "2919",
"license": "apache-2.0",
"hash": 2294070311911569400,
"line_mean": 35.9493670886,
"line_max": 79,
"alpha_frac": 0.6358341898,
"autogenerated": false,
"ratio": 4.60410094637224,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.573993513617224,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from rally.common.i18n import _
from rally.common import logging
from rally.common import utils as rutils
from rally import consts
from rally import osclients
from rally.plugins.openstack.wrappers import glance as glance_wrapper
from rally.task import context
from rally.task import utils
CONF = cfg.CONF
CONF.import_opt("glance_image_delete_timeout",
"rally.plugins.openstack.scenarios.glance.utils",
"benchmark")
CONF.import_opt("glance_image_delete_poll_interval",
"rally.plugins.openstack.scenarios.glance.utils",
"benchmark")
LOG = logging.getLogger(__name__)
@context.configure(name="images", order=410)
class ImageGenerator(context.Context):
"""Context class for adding images to each user for benchmarks."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"image_url": {
"type": "string",
},
"image_type": {
"enum": ["qcow2", "raw", "vhd", "vmdk", "vdi", "iso", "aki",
"ari", "ami"],
},
"image_container": {
"type": "string",
},
"image_name": {
"type": "string",
},
"min_ram": { # megabytes
"type": "integer",
"minimum": 0
},
"min_disk": { # gigabytes
"type": "integer",
"minimum": 0
},
"images_per_tenant": {
"type": "integer",
"minimum": 1
},
"image_args": {
"type": "object",
"additionalProperties": True
}
},
"required": ["image_url", "image_type", "image_container",
"images_per_tenant"],
"additionalProperties": False
}
@logging.log_task_wrapper(LOG.info, _("Enter context: `Images`"))
def setup(self):
image_url = self.config["image_url"]
image_type = self.config["image_type"]
image_container = self.config["image_container"]
images_per_tenant = self.config["images_per_tenant"]
image_name = self.config.get("image_name")
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
current_images = []
clients = osclients.Clients(
user["credential"],
api_info=self.context["config"].get("api_versions"))
glance_wrap = glance_wrapper.wrap(clients.glance, self)
kwargs = self.config.get("image_args", {})
if self.config.get("min_ram") is not None:
LOG.warning("The 'min_ram' argument is deprecated; specify "
"arbitrary arguments with 'image_args' instead")
kwargs["min_ram"] = self.config["min_ram"]
if self.config.get("min_disk") is not None:
LOG.warning("The 'min_disk' argument is deprecated; specify "
"arbitrary arguments with 'image_args' instead")
kwargs["min_disk"] = self.config["min_disk"]
for i in range(images_per_tenant):
if image_name and i > 0:
cur_name = image_name + str(i)
elif image_name:
cur_name = image_name
else:
cur_name = self.generate_random_name()
image = glance_wrap.create_image(
image_container, image_url, image_type,
name=cur_name, **kwargs)
current_images.append(image.id)
self.context["tenants"][tenant_id]["images"] = current_images
@logging.log_task_wrapper(LOG.info, _("Exit context: `Images`"))
def cleanup(self):
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
clients = osclients.Clients(
user["credential"],
api_info=self.context["config"].get("api_versions"))
glance_wrap = glance_wrapper.wrap(clients.glance, self)
for image in self.context["tenants"][tenant_id].get("images", []):
clients.glance().images.delete(image)
utils.wait_for_status(
clients.glance().images.get(image),
["deleted", "pending_delete"],
check_deletion=True,
update_resource=glance_wrap.get_image,
timeout=CONF.benchmark.glance_image_delete_timeout,
check_interval=CONF.benchmark.
glance_image_delete_poll_interval)
| {
"repo_name": "gluke77/rally",
"path": "rally/plugins/openstack/context/glance/images.py",
"copies": "5",
"size": "5358",
"license": "apache-2.0",
"hash": 1852387126220089900,
"line_mean": 37.8260869565,
"line_max": 78,
"alpha_frac": 0.5462859276,
"autogenerated": false,
"ratio": 4.252380952380952,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7298666879980952,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from rally.task import service
CONF = cfg.CONF
Volume = service.make_resource_cls(
"Volume", properties=["id", "name", "size", "status"])
VolumeSnapshot = service.make_resource_cls(
"VolumeSnapshot", properties=["id", "name", "volume_id", "status"])
VolumeBackup = service.make_resource_cls(
"VolumeBackup", properties=["id", "name", "volume_id", "status"])
VolumeTransfer = service.make_resource_cls(
"VolumeTransfer", properties=["id", "name", "volume_id", "auth_key"])
VolumeEncryptionType = service.make_resource_cls(
"VolumeEncryptionType", properties=["id", "volume_type_id"])
QoSSpecs = service.make_resource_cls(
"QoSSpecs", properties=["id", "name", "specs"])
class BlockStorage(service.UnifiedService):
@service.should_be_overridden
def create_volume(self, size, consistencygroup_id=None,
group_id=None, snapshot_id=None, source_volid=None,
name=None, description=None,
volume_type=None, user_id=None,
project_id=None, availability_zone=None,
metadata=None, imageRef=None, scheduler_hints=None,
source_replica=None, multiattach=False):
"""Creates a volume.
:param size: Size of volume in GB
:param consistencygroup_id: ID of the consistencygroup
:param group_id: ID of the group
:param snapshot_id: ID of the snapshot
:param name: Name of the volume
:param description: Description of the volume
:param volume_type: Type of volume
:param user_id: User id derived from context
:param project_id: Project id derived from context
:param availability_zone: Availability Zone to use
:param metadata: Optional metadata to set on volume creation
:param imageRef: reference to an image stored in glance
:param source_volid: ID of source volume to clone from
:param source_replica: ID of source volume to clone replica
:param scheduler_hints: (optional extension) arbitrary key-value pairs
specified by the client to help boot an instance
:param multiattach: Allow the volume to be attached to more than
one instance
:returns: Return a new volume.
"""
return self._impl.create_volume(
size, consistencygroup_id=consistencygroup_id, group_id=group_id,
snapshot_id=snapshot_id, source_volid=source_volid,
name=name, description=description, volume_type=volume_type,
user_id=user_id, project_id=project_id,
availability_zone=availability_zone, metadata=metadata,
imageRef=imageRef, scheduler_hints=scheduler_hints,
source_replica=source_replica, multiattach=multiattach)
@service.should_be_overridden
def list_volumes(self, detailed=True):
"""Lists all volumes.
:param detailed: Whether to return detailed volume info.
:returns: Return volumes list.
"""
return self._impl.list_volumes(detailed=detailed)
@service.should_be_overridden
def get_volume(self, volume_id):
"""Get a volume.
:param volume_id: The ID of the volume to get.
:returns: Return the volume.
"""
return self._impl.get_volume(volume_id)
@service.should_be_overridden
def update_volume(self, volume_id,
name=None, description=None):
"""Update the name or description for a volume.
:param volume_id: The updated volume id.
:param name: The volume name.
:param description: The volume description.
:returns: The updated volume.
"""
return self._impl.update_volume(
volume_id, name=name, description=description)
@service.should_be_overridden
def delete_volume(self, volume):
"""Delete a volume."""
self._impl.delete_volume(volume)
@service.should_be_overridden
def extend_volume(self, volume, new_size):
"""Extend the size of the specified volume."""
return self._impl.extend_volume(volume, new_size=new_size)
@service.should_be_overridden
def list_snapshots(self, detailed=True):
"""Get a list of all snapshots."""
return self._impl.list_snapshots(detailed=detailed)
@service.should_be_overridden
def list_types(self, search_opts=None, is_public=None):
"""Lists all volume types."""
return self._impl.list_types(search_opts=search_opts,
is_public=is_public)
@service.should_be_overridden
def set_metadata(self, volume, sets=10, set_size=3):
"""Update/Set a volume metadata.
:param volume: The updated/setted volume.
:param sets: how many operations to perform
:param set_size: number of metadata keys to set in each operation
:returns: A list of keys that were set
"""
return self._impl.set_metadata(volume, sets=sets, set_size=set_size)
@service.should_be_overridden
def delete_metadata(self, volume, keys, deletes=10, delete_size=3):
"""Delete volume metadata keys.
Note that ``len(keys)`` must be greater than or equal to
``deletes * delete_size``.
:param volume: The volume to delete metadata from
:param deletes: how many operations to perform
:param delete_size: number of metadata keys to delete in each operation
:param keys: a list of keys to choose deletion candidates from
"""
self._impl.delete_metadata(volume, keys, deletes=deletes,
delete_size=delete_size)
@service.should_be_overridden
def update_readonly_flag(self, volume, read_only):
"""Update the read-only access mode flag of the specified volume.
:param volume: The UUID of the volume to update.
:param read_only: The value to indicate whether to update volume to
read-only access mode.
:returns: A tuple of http Response and body
"""
return self._impl.update_readonly_flag(volume, read_only=read_only)
@service.should_be_overridden
def upload_volume_to_image(self, volume, force=False,
container_format="bare", disk_format="raw"):
"""Upload the given volume to image.
Returns created image.
:param volume: volume object
:param force: flag to indicate whether to snapshot a volume even if
it's attached to an instance
:param container_format: container format of image. Acceptable
formats: ami, ari, aki, bare, and ovf
:param disk_format: disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso
:returns: Returns created image object
"""
return self._impl.upload_volume_to_image(
volume, force=force, container_format=container_format,
disk_format=disk_format)
@service.should_be_overridden
def create_qos(self, specs):
"""Create a qos specs.
:param specs: A dict of key/value pairs to be set
:rtype: :class:'QoSSpecs'
"""
return self._impl.create_qos(specs)
@service.should_be_overridden
def list_qos(self, search_opts=None):
"""Get a list of all qos specs.
:param search_opts: search options
:rtype: list of :class: 'QoSpecs'
"""
return self._impl.list_qos(search_opts)
@service.should_be_overridden
def get_qos(self, qos_id):
"""Get a specific qos specs.
:param qos_id: The ID of the :class:`QoSSpecs` to get.
:rtype: :class:`QoSSpecs`
"""
return self._impl.get_qos(qos_id)
@service.should_be_overridden
def set_qos(self, qos, set_specs_args):
"""Add/Update keys in qos specs.
:param qos: The instance of the :class:`QoSSpecs` to set
:param set_specs_args: A dict of key/value pairs to be set
:rtype: :class:`QoSSpecs`
"""
return self._impl.set_qos(qos=qos,
set_specs_args=set_specs_args)
@service.should_be_overridden
def qos_associate_type(self, qos_specs, volume_type):
"""Associate qos specs from volume type.
:param qos_specs: The qos specs to be associated with
:param volume_type: The volume type id to be associated with
:rtype: :class:`QoSSpecs`
"""
return self._impl.qos_associate_type(qos_specs, volume_type)
@service.should_be_overridden
def qos_disassociate_type(self, qos_specs, volume_type):
"""Disassociate qos specs from volume type.
:param qos_specs: The qos specs to be associated with
:param volume_type: The volume type id to be disassociated with
:rtype: :class:`QoSSpecs`
"""
return self._impl.qos_disassociate_type(qos_specs, volume_type)
@service.should_be_overridden
def create_snapshot(self, volume_id, force=False,
name=None, description=None, metadata=None):
"""Create one snapshot.
Returns when the snapshot is actually created and is in the "Available"
state.
:param volume_id: volume uuid for creating snapshot
:param force: If force is True, create a snapshot even if the volume is
attached to an instance. Default is False.
:param name: Name of the snapshot
:param description: Description of the snapshot
:param metadata: Metadata of the snapshot
:returns: Created snapshot object
"""
return self._impl.create_snapshot(
volume_id, force=force, name=name,
description=description, metadata=metadata)
@service.should_be_overridden
def delete_snapshot(self, snapshot):
"""Delete the given snapshot.
Returns when the snapshot is actually deleted.
:param snapshot: snapshot instance
"""
self._impl.delete_snapshot(snapshot)
@service.should_be_overridden
def create_backup(self, volume_id, container=None,
name=None, description=None,
incremental=False, force=False,
snapshot_id=None):
"""Creates a volume backup.
:param volume_id: The ID of the volume to backup.
:param container: The name of the backup service container.
:param name: The name of the backup.
:param description: The description of the backup.
:param incremental: Incremental backup.
:param force: If True, allows an in-use volume to be backed up.
:param snapshot_id: The ID of the snapshot to backup.
:returns: The created backup object.
"""
return self._impl.create_backup(volume_id, container=container,
name=name, description=description,
incremental=incremental, force=force,
snapshot_id=snapshot_id)
@service.should_be_overridden
def delete_backup(self, backup):
"""Delete a volume backup."""
self._impl.delete_backup(backup)
@service.should_be_overridden
def restore_backup(self, backup_id, volume_id=None):
"""Restore the given backup.
:param backup_id: The ID of the backup to restore.
:param volume_id: The ID of the volume to restore the backup to.
:returns: Return the restored backup.
"""
return self._impl.restore_backup(backup_id, volume_id=volume_id)
@service.should_be_overridden
def list_backups(self, detailed=True):
"""Return user volume backups list."""
return self._impl.list_backups(detailed=detailed)
@service.should_be_overridden
def list_transfers(self, detailed=True, search_opts=None):
"""Get a list of all volume transfers.
:param detailed: If True, detailed information about transfer
should be listed
:param search_opts: Search options to filter out volume transfers
:returns: list of :class:`VolumeTransfer`
"""
return self._impl.list_transfers(detailed=detailed,
search_opts=search_opts)
@service.should_be_overridden
def create_volume_type(self, name=None, description=None, is_public=True):
"""Creates a volume type.
:param name: Descriptive name of the volume type
:param description: Description of the volume type
:param is_public: Volume type visibility
:returns: Return the created volume type.
"""
return self._impl.create_volume_type(name=name,
description=description,
is_public=is_public)
@service.should_be_overridden
def get_volume_type(self, volume_type):
"""get details of volume_type.
:param volume_type: The ID of the :class:`VolumeType` to get
:returns: :class:`VolumeType`
"""
return self._impl.get_volume_type(volume_type)
@service.should_be_overridden
def delete_volume_type(self, volume_type):
"""delete a volume type.
:param volume_type: Name or Id of the volume type
:returns: base on client response return True if the request
has been accepted or not
"""
return self._impl.delete_volume_type(volume_type)
@service.should_be_overridden
def set_volume_type_keys(self, volume_type, metadata):
"""Set extra specs on a volume type.
:param volume_type: The :class:`VolumeType` to set extra spec on
:param metadata: A dict of key/value pairs to be set
:returns: extra_specs if the request has been accepted
"""
return self._impl.set_volume_type_keys(volume_type, metadata)
@service.should_be_overridden
def transfer_create(self, volume_id, name=None):
"""Creates a volume transfer.
:param name: The name of created transfer
:param volume_id: The ID of the volume to transfer.
:returns: Return the created transfer.
"""
return self._impl.transfer_create(volume_id, name=name)
@service.should_be_overridden
def transfer_accept(self, transfer_id, auth_key):
"""Accept a volume transfer.
:param transfer_id: The ID of the transfer to accept.
:param auth_key: The auth_key of the transfer.
:returns: VolumeTransfer
"""
return self._impl.transfer_accept(transfer_id, auth_key=auth_key)
@service.should_be_overridden
def create_encryption_type(self, volume_type, specs):
"""Create encryption type for a volume type. Default: admin only.
:param volume_type: the volume type on which to add an encryption type
:param specs: the encryption type specifications to add
:return: an instance of :class: VolumeEncryptionType
"""
return self._impl.create_encryption_type(volume_type, specs=specs)
@service.should_be_overridden
def get_encryption_type(self, volume_type):
"""Get the volume encryption type for the specified volume type.
:param volume_type: the volume type to query
:return: an instance of :class: VolumeEncryptionType
"""
return self._impl.get_encryption_type(volume_type)
@service.should_be_overridden
def list_encryption_type(self, search_opts=None):
"""List all volume encryption types.
:param search_opts: Options used when search for encryption types
:return: a list of :class: VolumeEncryptionType instances
"""
return self._impl.list_encryption_type(search_opts=search_opts)
@service.should_be_overridden
def delete_encryption_type(self, volume_type):
"""Delete the encryption type information for the specified volume type.
:param volume_type: the volume type whose encryption type information
must be deleted
"""
self._impl.delete_encryption_type(volume_type)
@service.should_be_overridden
def update_encryption_type(self, volume_type, specs):
"""Update the encryption type information for the specified volume type.
:param volume_type: the volume type whose encryption type information
will be updated
:param specs: the encryption type specifications to update
:return: an instance of :class: VolumeEncryptionType
"""
return self._impl.update_encryption_type(volume_type, specs=specs)
| {
"repo_name": "yeming233/rally",
"path": "rally/plugins/openstack/services/storage/block.py",
"copies": "1",
"size": "17489",
"license": "apache-2.0",
"hash": -1683127262000868600,
"line_mean": 38.6575963719,
"line_max": 80,
"alpha_frac": 0.6314826462,
"autogenerated": false,
"ratio": 4.2938865700957525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5425369216295752,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pecan import hooks
class UserFilterHook(hooks.PecanHook):
# we do this at the very end to ensure user-defined filters
# don't impact things like pagination and notification hooks
priority = 90
def after(self, state):
user_fields = state.request.params.getall('fields')
if not user_fields:
return
try:
data = state.response.json
except ValueError:
return
resource = state.request.context.get('resource')
collection = state.request.context.get('collection')
if collection not in data and resource not in data:
return
is_single = resource in data
key = resource if resource in data else collection
if is_single:
data[key] = self._filter_item(
state.response.json[key], user_fields)
else:
data[key] = [
self._filter_item(i, user_fields)
for i in state.response.json[key]
]
state.response.json = data
def _filter_item(self, item, fields):
return {
field: value
for field, value in item.items()
if field in fields
}
| {
"repo_name": "eayunstack/neutron",
"path": "neutron/pecan_wsgi/hooks/userfilters.py",
"copies": "5",
"size": "1791",
"license": "apache-2.0",
"hash": 904983897933084400,
"line_mean": 32.7924528302,
"line_max": 75,
"alpha_frac": 0.6320491346,
"autogenerated": false,
"ratio": 4.368292682926829,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7500341817526829,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.benchmark.scenarios import base
from rally.benchmark import utils as bench_utils
class CeilometerScenario(base.Scenario):
"""Base class for Ceilometer scenarios with basic atomic actions."""
RESOURCE_NAME_PREFIX = "rally_ceilometer_"
def _get_alarm_dict(self, **kwargs):
"""Prepare and return an alarm dict for creating an alarm.
:param kwargs: optional parameters to create alarm
:returns: alarm dictionary used to create an alarm
"""
alarm_id = self._generate_random_name()
alarm = {"alarm_id": alarm_id,
"name": alarm_id,
"description": "Test Alarm"}
alarm.update(kwargs)
return alarm
@base.atomic_action_timer("ceilometer.list_alarms")
def _list_alarms(self, alarm_id=None):
"""List alarms.
List alarm matching alarm_id. It fetches all alarms
if alarm_id is None.
:param alarm_id: specifies id of the alarm
:returns: list of alarms
"""
if alarm_id:
return self.clients("ceilometer").alarms.get(alarm_id)
else:
return self.clients("ceilometer").alarms.list()
@base.atomic_action_timer("ceilometer.create_alarm")
def _create_alarm(self, meter_name, threshold, kwargs):
"""Create an alarm.
:param meter_name: specifies meter name of the alarm
:param threshold: specifies alarm threshold
:param kwargs: contains optional features of alarm to be created
:returns: alarm
"""
kwargs.update({"meter_name": meter_name,
"threshold": threshold})
alarm_dict = self._get_alarm_dict(**kwargs)
alarm = self.clients("ceilometer").alarms.create(**alarm_dict)
return alarm
@base.atomic_action_timer("ceilometer.delete_alarm")
def _delete_alarm(self, alarm_id):
"""Delete an alarm.
:param alarm_id: specifies id of the alarm
"""
self.clients("ceilometer").alarms.delete(alarm_id)
@base.atomic_action_timer("ceilometer.update_alarm")
def _update_alarm(self, alarm_id, alarm_dict_delta):
"""Update an alarm.
:param alarm_id: specifies id of the alarm
:param alarm_dict_delta: features of alarm to be updated
"""
self.clients("ceilometer").alarms.update(alarm_id, **alarm_dict_delta)
@base.atomic_action_timer("ceilometer.get_alarm_history")
def _get_alarm_history(self, alarm_id):
"""Assemble the alarm history requested.
:param alarm_id: specifies id of the alarm
:returns: list of alarm changes
"""
return self.clients("ceilometer").alarms.get_history(alarm_id)
@base.atomic_action_timer("ceilometer.get_alarm_state")
def _get_alarm_state(self, alarm_id):
"""Get the state of the alarm.
:param alarm_id: specifies id of the alarm
:returns: state of the alarm
"""
return self.clients("ceilometer").alarms.get_state(alarm_id)
@base.atomic_action_timer("ceilometer.set_alarm_state")
def _set_alarm_state(self, alarm, state, timeout):
"""Set the state of the alarm.
:param alarm: alarm instance
:param state: an alarm state to be set
:param timeout: The number of seconds for which to attempt a
successful check of the alarm state.
:returns: alarm in the set state
"""
self.clients("ceilometer").alarms.set_state(alarm.alarm_id, state)
return bench_utils.wait_for(alarm,
is_ready=bench_utils.resource_is(state),
update_resource=bench_utils
.get_from_manager(),
timeout=timeout, check_interval=1)
@base.atomic_action_timer("ceilometer.get_meters")
def _list_meters(self):
"""Get list of user's meters."""
return self.clients("ceilometer").meters.list()
@base.atomic_action_timer("ceilometer.list_resources")
def _list_resources(self):
"""List all resources.
:returns: list of all resources
"""
return self.clients("ceilometer").resources.list()
@base.atomic_action_timer("ceilometer.list_samples")
def _list_samples(self):
"""List all Samples.
:returns: list of all samples
"""
return self.clients("ceilometer").samples.list()
@base.atomic_action_timer("ceilometer.get_resource")
def _get_resource(self, resource_id):
"""Retrieve details about one resource."""
return self.clients("ceilometer").resources.get(resource_id)
@base.atomic_action_timer("ceilometer.get_stats")
def _get_stats(self, meter_name):
"""Get stats for a specific meter.
:param meter_name: Name of ceilometer meter
"""
return self.clients("ceilometer").statistics.list(meter_name)
@base.atomic_action_timer("ceilometer.create_meter")
def _create_meter(self, **kwargs):
"""Create a new meter.
:param name_length: Length of meter name to be generated
:param kwargs: Contains the optional attributes for meter creation
:returns: Newly created meter
"""
name = self._generate_random_name()
samples = self.clients("ceilometer").samples.create(
counter_name=name, **kwargs)
return samples[0]
@base.atomic_action_timer("ceilometer.query_alarms")
def _query_alarms(self, filter, orderby, limit):
"""Query alarms with specific parameters.
If no input params are provided, it returns all the results
in the database.
:param limit: optional param for maximum number of results returned
:param orderby: optional param for specifying ordering of results
:param filter: optional filter query
:returns: queried alarms
"""
return self.clients("ceilometer").query_alarms.query(
filter, orderby, limit)
@base.atomic_action_timer("ceilometer.query_alarm_history")
def _query_alarm_history(self, filter, orderby, limit):
"""Query history of an alarm.
If no input params are provided, it returns all the results
in the database.
:param limit: optional param for maximum number of results returned
:param orderby: optional param for specifying ordering of results
:param filter: optional filter query
:returns: alarm history
"""
return self.clients("ceilometer").query_alarm_history.query(
filter, orderby, limit)
@base.atomic_action_timer("ceilometer.create_sample")
def _create_sample(self, counter_name, counter_type, counter_unit,
counter_volume, resource_id=None, **kwargs):
"""Create a Sample with specified parameters.
:param counter_name: specifies name of the counter
:param counter_type: specifies type of the counter
:param counter_unit: specifies name of the counter
:param counter_volume: specifies name of the counter
:param resource_id: specifies resource id for the sample created
:param kwargs: contains optional parameters for creating a sample
:returns: created sample
"""
kwargs.update({"counter_name": counter_name,
"counter_type": counter_type,
"counter_unit": counter_unit,
"counter_volume": counter_volume,
"resource_id": resource_id if resource_id
else self._generate_random_name(
prefix="rally_ctx_resource_")})
return self.clients("ceilometer").samples.create(**kwargs)
@base.atomic_action_timer("ceilometer.query_samples")
def _query_samples(self, filter, orderby, limit):
"""Query samples with specified parameters.
If no input params are provided, it returns all the results
in the database.
:param limit: optional param for maximum number of results returned
:param orderby: optional param for specifying ordering of results
:param filter: optional filter query
:returns: queried samples
"""
return self.clients("ceilometer").query_samples.query(
filter, orderby, limit)
| {
"repo_name": "vponomaryov/rally",
"path": "rally/plugins/openstack/scenarios/ceilometer/utils.py",
"copies": "1",
"size": "9012",
"license": "apache-2.0",
"hash": -4475614461734561300,
"line_mean": 37.8448275862,
"line_max": 78,
"alpha_frac": 0.6303817133,
"autogenerated": false,
"ratio": 4.307839388145315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5438221101445315,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.benchmark.scenarios import base
from rally.benchmark.scenarios.ec2 import utils
from rally.benchmark import types
from rally.benchmark import validation
from rally.common import log as logging
from rally import consts
LOG = logging.getLogger(__name__)
class EC2Servers(utils.EC2Scenario):
"""Benchmark scenarios for servers using EC2."""
@types.set(image=types.EC2ImageResourceType,
flavor=types.EC2FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.EC2)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["ec2"]})
def boot_server(self, image, flavor, **kwargs):
"""Boot a server.
Assumes that cleanup is done elsewhere.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param kwargs: optional additional arguments for server creation
"""
self._boot_server(image, flavor, **kwargs)
| {
"repo_name": "varunarya10/rally",
"path": "rally/benchmark/scenarios/ec2/servers.py",
"copies": "2",
"size": "1615",
"license": "apache-2.0",
"hash": 7095562656100654000,
"line_mean": 35.7045454545,
"line_max": 75,
"alpha_frac": 0.7294117647,
"autogenerated": false,
"ratio": 4.098984771573604,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 44
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import log as logging
LOG = logging.getLogger(__name__)
class NeutronQuotas(object):
"""Management of Neutron quotas."""
QUOTAS_SCHEMA = {
"type": "object",
"additionalProperties": False,
"properties": {
"network": {
"type": "integer",
"minimum": -1
},
"subnet": {
"type": "integer",
"minimum": -1
},
"port": {
"type": "integer",
"minimum": -1
},
"router": {
"type": "integer",
"minimum": -1
},
"floatingip": {
"type": "integer",
"minimum": -1
},
"security_group": {
"type": "integer",
"minimum": -1
},
"security_group_rule": {
"type": "integer",
"minimum": -1
}
}
}
def __init__(self, clients):
self.clients = clients
def update(self, tenant_id, **kwargs):
body = {"quota": kwargs}
self.clients.neutron().update_quota(tenant_id, body=body)
def delete(self, tenant_id):
# Reset quotas to defaults and tag database objects as deleted
self.clients.neutron().delete_quota(tenant_id)
| {
"repo_name": "vponomaryov/rally",
"path": "rally/plugins/openstack/context/quotas/neutron_quotas.py",
"copies": "3",
"size": "1992",
"license": "apache-2.0",
"hash": 5448351872551479000,
"line_mean": 28.2941176471,
"line_max": 78,
"alpha_frac": 0.5135542169,
"autogenerated": false,
"ratio": 4.416851441241685,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 68
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import logging
from rally import consts
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.ec2 import utils
from rally.task import types
from rally.task import validation
LOG = logging.getLogger(__name__)
class EC2Servers(utils.EC2Scenario):
"""Benchmark scenarios for servers using EC2."""
@validation.required_services(consts.Service.EC2)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["ec2"]})
def list_servers(self):
"""List all servers.
This simple scenario tests the EC2 API list function by listing
all the servers.
"""
self._list_servers()
@types.set(image=types.EC2ImageResourceType,
flavor=types.EC2FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.EC2)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["ec2"]})
def boot_server(self, image, flavor, **kwargs):
"""Boot a server.
Assumes that cleanup is done elsewhere.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param kwargs: optional additional arguments for server creation
"""
self._boot_servers(image, flavor, **kwargs)
| {
"repo_name": "amit0701/rally",
"path": "rally/plugins/openstack/scenarios/ec2/servers.py",
"copies": "1",
"size": "1966",
"license": "apache-2.0",
"hash": -4851321669202765000,
"line_mean": 34.7454545455,
"line_max": 75,
"alpha_frac": 0.7136317396,
"autogenerated": false,
"ratio": 4.130252100840337,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 55
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import logging
from rally import exceptions
from rally.plugins.openstack import service
from rally.plugins.openstack.services.identity import identity
from rally.plugins.openstack.services.identity import keystone_common
from rally.task import atomic
LOG = logging.getLogger(__name__)
@service.service("keystone", service_type="identity", version="3")
class KeystoneV3Service(service.Service, keystone_common.KeystoneMixin):
def _get_domain_id(self, domain_name_or_id):
from keystoneclient import exceptions as kc_exceptions
try:
# First try to find domain by ID
return self._clients.keystone("3").domains.get(
domain_name_or_id).id
except kc_exceptions.NotFound:
# Domain not found by ID, try to find it by name
domains = self._clients.keystone("3").domains.list(
name=domain_name_or_id)
if domains:
return domains[0].id
# Domain not found by name
raise exceptions.GetResourceNotFound(
resource="KeystoneDomain(%s)" % domain_name_or_id)
@atomic.action_timer("keystone_v3.create_project")
def create_project(self, project_name=None, domain_name="Default"):
project_name = project_name or self.generate_random_name()
domain_id = self._get_domain_id(domain_name)
return self._clients.keystone("3").projects.create(name=project_name,
domain=domain_id)
@atomic.action_timer("keystone_v3.update_project")
def update_project(self, project_id, name=None, enabled=None,
description=None):
"""Update tenant name and description.
:param project_id: Id of project to update
:param name: project name to be set (if boolean True, random name will
be set)
:param enabled: enabled status of project
:param description: project description to be set (if boolean True,
random description will be set)
"""
if name is True:
name = self.generate_random_name()
if description is True:
description = self.generate_random_name()
self._clients.keystone("3").projects.update(
project_id, name=name, description=description, enabled=enabled)
@atomic.action_timer("keystone_v3.delete_project")
def delete_project(self, project_id):
self._clients.keystone("3").projects.delete(project_id)
@atomic.action_timer("keystone_v3.list_projects")
def list_projects(self):
return self._clients.keystone("3").projects.list()
@atomic.action_timer("keystone_v3.get_project")
def get_project(self, project_id):
"""Get project."""
return self._clients.keystone("3").projects.get(project_id)
@atomic.action_timer("keystone_v3.create_user")
def create_user(self, username=None, password=None, project_id=None,
domain_name="Default", enabled=True,
default_role="member"):
"""Create user.
:param username: name of user
:param password: user password
:param project_id: user's default project
:param domain_name: Name or id of domain where to create project.
:param enabled: whether the user is enabled.
:param default_role: user's default role
"""
domain_id = self._get_domain_id(domain_name)
username = username or self.generate_random_name()
user = self._clients.keystone("3").users.create(
name=username, password=password, default_project=project_id,
domain=domain_id, enabled=enabled)
if project_id:
# we can't setup role without project_id
roles = self.list_roles()
for role in roles:
if default_role == role.name.lower():
self.add_role(role_id=role.id,
user_id=user.id,
project_id=project_id)
return user
for role in roles:
if default_role == role.name.lower().strip("_"):
self.add_role(role_id=role.id,
user_id=user.id,
project_id=project_id)
return user
LOG.warning("Unable to set %s role to created user." %
default_role)
return user
@atomic.action_timer("keystone_v3.create_users")
def create_users(self, project_id, number_of_users, user_create_args=None):
"""Create specified amount of users.
:param project_id: Id of project
:param number_of_users: number of users to create
:param user_create_args: additional user creation arguments
"""
users = []
for _i in range(number_of_users):
users.append(self.create_user(project_id=project_id,
**(user_create_args or {})))
return users
@atomic.action_timer("keystone_v3.update_user")
def update_user(self, user_id, name=None, domain_name=None,
project_id=None, password=None, email=None,
description=None, enabled=None, default_project=None):
domain = None
if domain_name:
domain = self._get_domain_id(domain_name)
self._clients.keystone("3").users.update(
user_id, name=name, domain=domain, project=project_id,
password=password, email=email, description=description,
enabled=enabled, default_project=default_project)
@atomic.action_timer("keystone_v3.create_service")
def create_service(self, name=None, service_type=None, description=None,
enabled=True):
"""Creates keystone service.
:param name: name of service to create
:param service_type: type of the service
:param description: description of the service
:param enabled: whether the service appears in the catalog
:returns: keystone service instance
"""
name = name or self.generate_random_name()
service_type = service_type or "rally_test_type"
description = description or self.generate_random_name()
return self._clients.keystone("3").services.create(
name, type=service_type, description=description, enabled=enabled)
@atomic.action_timer("keystone_v3.create_role")
def create_role(self, name=None, domain_name=None):
domain_id = None
if domain_name:
domain_id = self._get_domain_id(domain_name)
name = name or self.generate_random_name()
return self._clients.keystone("3").roles.create(name, domain=domain_id)
@atomic.action_timer("keystone_v3.add_role")
def add_role(self, role_id, user_id, project_id):
self._clients.keystone("3").roles.grant(role=role_id,
user=user_id,
project=project_id)
@atomic.action_timer("keystone_v3.list_roles")
def list_roles(self, user_id=None, project_id=None, domain_name=None):
"""List all roles."""
domain_id = None
if domain_name:
domain_id = self._get_domain_id(domain_name)
return self._clients.keystone("3").roles.list(user=user_id,
project=project_id,
domain=domain_id)
@atomic.action_timer("keystone_v3.revoke_role")
def revoke_role(self, role_id, user_id, project_id):
self._clients.keystone("3").roles.revoke(role=role_id,
user=user_id,
project=project_id)
@atomic.action_timer("keystone_v3.create_domain")
def create_domain(self, name, description=None, enabled=True):
return self._clients.keystone("3").domains.create(
name, description=description, enabled=enabled)
@atomic.action_timer("keystone_v3.create_ec2creds")
def create_ec2credentials(self, user_id, project_id):
"""Create ec2credentials.
:param user_id: User ID for which to create credentials
:param project_id: Tenant ID for which to create credentials
:returns: Created ec2-credentials object
"""
return self._clients.keystone("3").ec2.create(user_id,
project_id=project_id)
@service.compat_layer(KeystoneV3Service)
class UnifiedKeystoneV3Service(keystone_common.UnifiedKeystoneMixin,
identity.Identity):
@staticmethod
def _unify_project(project):
return identity.Project(id=project.id, name=project.name,
domain_id=project.domain_id)
@staticmethod
def _unify_user(user):
# When user has default_project_id that is None user.default_project_id
# will raise AttributeError
project_id = getattr(user, "project_id",
getattr(user, "default_project_id", None))
return identity.User(id=user.id, name=user.name, project_id=project_id,
domain_id=user.domain_id)
def create_project(self, project_name=None, domain_name="Default"):
"""Creates new project/tenant and return project object.
:param project_name: Name of project to be created.
:param domain_name: Name or id of domain where to create project,
"""
project = self._impl.create_project(project_name,
domain_name=domain_name)
return self._unify_project(project)
def update_project(self, project_id, name=None, enabled=None,
description=None):
"""Update project name, enabled and description
:param project_id: Id of project to update
:param name: project name to be set
:param enabled: enabled status of project
:param description: project description to be set
"""
self._impl.update_project(project_id=project_id, name=name,
enabled=enabled, description=description)
def delete_project(self, project_id):
"""Deletes project."""
return self._impl.delete_project(project_id)
def list_projects(self):
"""List all projects."""
return [self._unify_project(p) for p in self._impl.list_projects()]
def get_project(self, project_id):
"""Get project."""
return self._unify_project(self._impl.get_project(project_id))
def create_user(self, username=None, password=None, project_id=None,
domain_name="Default", enabled=True,
default_role="member"):
"""Create user.
:param username: name of user
:param password: user password
:param project_id: user's default project
:param domain_name: Name or id of domain where to create project,
:param enabled: whether the user is enabled.
:param default_role: Name of default user's role
"""
return self._unify_user(self._impl.create_user(
username=username, password=password, project_id=project_id,
domain_name=domain_name, default_role=default_role,
enabled=enabled))
def create_users(self, project_id, number_of_users, user_create_args=None):
"""Create specified amount of users.
:param project_id: Id of project
:param number_of_users: number of users to create
:param user_create_args: additional user creation arguments
"""
return [self._unify_user(u)
for u in self._impl.create_users(
project_id=project_id, number_of_users=number_of_users,
user_create_args=user_create_args)]
def list_users(self):
"""List all users."""
return [self._unify_user(u) for u in self._impl.list_users()]
def update_user(self, user_id, enabled=None, name=None, email=None,
password=None):
return self._impl.update_user(user_id, enabled=enabled, name=name,
email=email, password=password)
def list_services(self):
"""List all services."""
return [self._unify_service(s) for s in self._impl.list_services()]
def create_role(self, name=None, domain_name=None):
"""Add role to user."""
return self._unify_role(self._impl.create_role(
name, domain_name=domain_name))
def add_role(self, role_id, user_id, project_id):
"""Add role to user."""
self._impl.add_role(role_id=role_id, user_id=user_id,
project_id=project_id)
def revoke_role(self, role_id, user_id, project_id):
"""Revokes a role from a user."""
return self._impl.revoke_role(role_id=role_id, user_id=user_id,
project_id=project_id)
def list_roles(self, user_id=None, project_id=None, domain_name=None):
"""List all roles."""
return [self._unify_role(role) for role in self._impl.list_roles(
user_id=user_id, project_id=project_id, domain_name=domain_name)]
def create_ec2credentials(self, user_id, project_id):
"""Create ec2credentials.
:param user_id: User ID for which to create credentials
:param project_id: Project ID for which to create credentials
:returns: Created ec2-credentials object
"""
return self._impl.create_ec2credentials(user_id=user_id,
project_id=project_id)
| {
"repo_name": "yeming233/rally",
"path": "rally/plugins/openstack/services/identity/keystone_v3.py",
"copies": "1",
"size": "14485",
"license": "apache-2.0",
"hash": 1784757120397807000,
"line_mean": 41.6029411765,
"line_max": 79,
"alpha_frac": 0.6,
"autogenerated": false,
"ratio": 4.170745752951339,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 340
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally import consts
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.ec2 import utils
from rally.task import types
from rally.task import validation
class EC2Servers(utils.EC2Scenario):
"""Benchmark scenarios for servers using EC2."""
@validation.required_services(consts.Service.EC2)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["ec2"]})
def list_servers(self):
"""List all servers.
This simple scenario tests the EC2 API list function by listing
all the servers.
"""
self._list_servers()
@types.convert(image={"type": "ec2_image"},
flavor={"type": "ec2_flavor"})
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.EC2)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["ec2"]})
def boot_server(self, image, flavor, **kwargs):
"""Boot a server.
Assumes that cleanup is done elsewhere.
:param image: image to be used to boot an instance
:param flavor: flavor to be used to boot an instance
:param kwargs: optional additional arguments for server creation
"""
self._boot_servers(image, flavor, **kwargs)
| {
"repo_name": "vganapath/rally",
"path": "rally/plugins/openstack/scenarios/ec2/servers.py",
"copies": "3",
"size": "1895",
"license": "apache-2.0",
"hash": -4357293118544458000,
"line_mean": 36.1568627451,
"line_max": 75,
"alpha_frac": 0.7018469657,
"autogenerated": false,
"ratio": 4.137554585152839,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6339401550852839,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally import consts
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.monasca import utils as monascautils
from rally.task import validation
"""Scenarios for monasca Metrics API."""
@validation.add("required_services",
services=[consts.Service.MONASCA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="MonascaMetrics.list_metrics", platform="openstack")
class ListMetrics(monascautils.MonascaScenario):
def run(self, **kwargs):
"""Fetch user's metrics.
:param kwargs: optional arguments for list query:
name, dimensions, start_time, etc
"""
self._list_metrics(**kwargs)
| {
"repo_name": "yeming233/rally",
"path": "rally/plugins/openstack/scenarios/monasca/metrics.py",
"copies": "1",
"size": "1329",
"license": "apache-2.0",
"hash": -7596638258384724000,
"line_mean": 35.9166666667,
"line_max": 78,
"alpha_frac": 0.7185854026,
"autogenerated": false,
"ratio": 4.027272727272727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5245858129872727,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tacker import context
from tacker.nfvo.drivers.workflow import workflow_generator
from tacker.tests.unit import base
def get_dummy_ns():
return {'ns': {'description': '',
'tenant_id': 'a81900a92bda40588c52699e1873a92f',
'vim_id': '96025dd5-ca16-49f3-9823-958eb04260c4',
'vnf_ids': '', 'attributes': {},
'nsd_id': 'b8587afb-6099-4f56-abce-572c62e3d61d',
'name': 'test_create_ns'},
'vnfd_details': {'vnf1': {'instances': ['VNF1'],
'id': 'dec09ed4-f355-4ec8-a00b-8548f6575a80'},
'vnf2': {'instances': ['VNF2'],
'id': '9f8f2af7-6407-4f79-a6fe-302c56172231'}},
'placement_attr': {}}
def get_dummy_vnffg_ns():
return {
'ns': {
'description': '',
'vim_id': '96025dd5-ca16-49f3-9823-958eb04260c4',
'vnf_ids': '', 'attributes': {},
'nsd_id': 'b8587afb-6099-4f56-abce-572c62e3d61d',
'name': 'test_create_ns'},
'vnfd_details': {
'vnf1': {'instances': ['VNF1'],
'id': 'dec09ed4-f355-4ec8-a00b-8548f6575a80'},
'vnf2': {'instances': ['VNF2'],
'id': '9f8f2af7-6407-4f79-a6fe-302c56172231'}},
'placement_attr': {},
'vnffgd_templates': {
'VNFFG1': {
'tosca_definitions_version':
'tosca_simple_profile_for_nfv_1_0_0',
'description': 'VNFFG1 descriptor',
'topology_template': {
'node_templates': {
'Forwarding_path1': {
'type': 'tosca.nodes.nfv.FP.TackerV2',
'description': 'creates path inside ns - test',
'properties': {
'policy': {
'type': 'ACL',
'criteria': [{
'classifier': {
'ip_proto': 6,
'network_src_port_id': {
'get_input': 'net_src_port_id'
},
'ip_dst_prefix': {
'get_input': 'ip_dest_prefix'
},
'destination_port_range': '80-1024'
},
'name': 'block_tcp'}]},
'path': [
{'capability': 'CP12',
'forwarder': 'vnf1'},
{'capability': 'CP22',
'forwarder': 'vnf2'}],
'id': 51}}},
'groups': {
'VNFFG1': {
'type': 'tosca.groups.nfv.VNFFG',
'description': 'HTTP to Corporate Net',
'members': ['Forwarding_path1'],
'properties': {
'version': 1.0,
'vendor': 'tacker',
'constituent_vnfs': ['vnf1', 'vnf2'],
'connection_point': ['CP12', 'CP22'],
'number_of_endpoints': 2,
'dependent_virtual_link': ['VL1', 'VL2']}
}
}
}
}
}
}
def get_dummy_param():
return {'vnf1': {'substitution_mappings': {'VL1b8587afb-60': {
'type': 'tosca.nodes.nfv.VL', 'properties': {
'network_name': 'net_mgmt',
'vendor': 'tacker'}}, 'requirements': {
'virtualLink2': 'VL2b8587afb-60',
'virtualLink1': 'VL1b8587afb-60'}, 'VL2b8587afb-60': {
'type': 'tosca.nodes.nfv.VL',
'properties': {'network_name': 'net0',
'vendor': 'tacker'}}}},
'nsd': {'vl2_name': 'net0', 'vl1_name': 'net_mgmt'}}
def get_dummy_create_workflow():
return {'std.create_ns_dummy': {'input': ['ns'],
'tasks': {
'wait_vnf_active_VNF2': {
'action': 'tacker.show_vnf vnf=<% $.vnf_id_VNF2 %>',
'retry': {'count': 10, 'delay': 10,
'continue-on': '<% $.status_VNF2 = '
'"PENDING_CREATE" %>',
'break-on': '<% $.status_VNF2 = "ERROR" %>'},
'publish': {
'status_VNF2': '<% task(wait_vnf_active_VNF2).'
'result.vnf.status %>',
'mgmt_ip_address_VNF2': ' <% task('
'wait_vnf_active_VNF2).'
'result.vnf.'
'mgmt_ip_address %>'},
'on-success': [{
'delete_vnf_VNF2': '<% $.status_VNF2='
'"ERROR" %>'}]},
'create_ns_VNF2': {
'action': 'tacker.create_vnf body=<% $.ns.VNF2 %>',
'input': {'body': '<% $.ns.VNF2 %>'},
'publish': {
'status_VNF2': '<% task(create_ns_VNF2).'
'result.vnf.status %>',
'vim_id_VNF2': '<% task(create_ns_VNF2).'
'result.vnf.vim_id %>',
'mgmt_ip_address_VNF2': '<% task('
'create_ns_VNF2).'
'result.vnf.'
'mgmt_ip_address %>',
'vnf_id_VNF2': '<% task(create_ns_VNF2)'
'.result.vnf.id %>'},
'on-success': ['wait_vnf_active_VNF2']},
'create_ns_VNF1': {
'action': 'tacker.create_vnf body=<% $.ns.VNF1 %>',
'input': {'body': '<% $.ns.VNF1 %>'},
'publish': {
'status_VNF1': '<% task(create_ns_VNF1).'
'result.vnf.status %>',
'vnf_id_VNF1': '<% task(create_ns_VNF1).'
'result.vnf.id %>',
'mgmt_ip_address_VNF1': '<% task('
'create_ns_VNF1).'
'result.vnf.'
'mgmt_ip_address %>',
'vim_id_VNF1': '<% task(create_ns_VNF1).'
'result.vnf.vim_id %>'},
'on-success': ['wait_vnf_active_VNF1']},
'wait_vnf_active_VNF1': {
'action': 'tacker.show_vnf vnf=<% $.vnf_id_VNF1 %>',
'retry': {'count': 10, 'delay': 10,
'continue-on': '<% $.status_VNF1 = "PENDING_'
'CREATE" %>',
'break-on': '<% $.status_VNF1 = "ERROR" %>'},
'publish': {
'status_VNF1': '<% task(wait_vnf_active_VNF1).'
'result.vnf.status %>',
'mgmt_ip_address_VNF1': ' <% task('
'wait_vnf_active_VNF1).'
'result.vnf.'
'mgmt_ip_address %>'},
'on-success': [{'delete_vnf_VNF1': '<% $.status_VNF1='
'"ERROR" %>'}]},
'delete_vnf_VNF1': {
'action': 'tacker.delete_vnf vnf=<% $.vnf_id_VNF1%>',
'input': {'body': {'vnf': {'attributes': {
'force': False}}}}},
'delete_vnf_VNF2': {
'action': 'tacker.delete_vnf vnf=<% $.vnf_id_VNF2%>',
'input': {'body': {'vnf': {'attributes': {
'force': False}}}}}},
'type': 'direct', 'output': {
'status_VNF1': '<% $.status_VNF1 %>',
'status_VNF2': '<% $.status_VNF2 %>',
'mgmt_ip_address_VNF2': '<% $.mgmt_ip_address_VNF2 %>',
'mgmt_ip_address_VNF1': '<% $.mgmt_ip_address_VNF1 %>',
'vim_id_VNF2': '<% $.vim_id_VNF2 %>',
'vnf_id_VNF1': '<% $.vnf_id_VNF1 %>',
'vnf_id_VNF2': '<% $.vnf_id_VNF2 %>',
'vim_id_VNF1': '<% $.vim_id_VNF1 %>'}},
'version': '2.0'}
def get_dummy_create_vnffg_ns_workflow():
return {
'std.create_ns_dummy': {
'input': ['ns'],
'tasks': {
'wait_vnf_active_VNF2': {
'action': 'tacker.show_vnf vnf=<% $.vnf_id_VNF2 %>',
'retry': {
'count': 10,
'delay': 10,
'continue-on':
'<% $.status_VNF2 = "PENDING_CREATE" %>',
'break-on':
'<% $.status_VNF2 = "ERROR" %>'},
'publish': {
'status_VNF2':
'<% task(wait_vnf_active_VNF2).result.'
'vnf.status %>',
'mgmt_ip_address_VNF2':
' <% task(wait_vnf_active_VNF2).result.'
'vnf.mgmt_ip_address %>'},
'on-success': [
{'delete_vnf_VNF2': '<% $.status_VNF2="ERROR" %>'},
'create_vnffg_VNFFG1']},
'create_vnffg_VNFFG1': {
'action': 'tacker.create_vnffg body=<% $.ns.VNFFG1 %>',
'input': {'body': '<% $.ns.VNFFG1 %>'},
'join': 'all',
'publish': {
'vnffg_id_VNFFG1': '<% task(create_vnffg_VNFFG1).'
'result.vnffg.id %>'}},
'wait_vnf_active_VNF1': {
'action': 'tacker.show_vnf vnf=<% $.vnf_id_VNF1 %>',
'retry': {
'count': 10,
'delay': 10,
'continue-on':
'<% $.status_VNF1 = "PENDING_CREATE" %>',
'break-on':
'<% $.status_VNF1 = "ERROR" %>'},
'publish': {
'status_VNF1':
'<% task(wait_vnf_active_VNF1).result.'
'vnf.status %>',
'mgmt_ip_address_VNF1':
' <% task(wait_vnf_active_VNF1).result.'
'vnf.mgmt_ip_address %>'},
'on-success': [
{'delete_vnf_VNF1': '<% $.status_VNF1="ERROR" %>'},
'create_vnffg_VNFFG1']},
'create_ns_VNF1': {
'action': 'tacker.create_vnf body=<% $.ns.VNF1 %>',
'input': {'body': '<% $.ns.VNF1 %>'},
'publish': {
'status_VNF1':
'<% task(create_ns_VNF1).result.vnf.status %>',
'vnf_id_VNF1':
'<% task(create_ns_VNF1).result.vnf.id %>',
'mgmt_ip_address_VNF1':
'<% task(create_ns_VNF1).result.'
'vnf.mgmt_ip_address %>',
'vim_id_VNF1':
'<% task(create_ns_VNF1).result.vnf.vim_id %>'},
'on-success': ['wait_vnf_active_VNF1']},
'create_ns_VNF2': {
'action': 'tacker.create_vnf body=<% $.ns.VNF2 %>',
'input': {'body': '<% $.ns.VNF2 %>'},
'publish': {
'status_VNF2':
'<% task(create_ns_VNF2).result.vnf.status %>',
'vim_id_VNF2':
'<% task(create_ns_VNF2).result.vnf.vim_id %>',
'mgmt_ip_address_VNF2':
'<% task(create_ns_VNF2).result.'
'vnf.mgmt_ip_address %>',
'vnf_id_VNF2':
'<% task(create_ns_VNF2).result.vnf.id %>'},
'on-success': ['wait_vnf_active_VNF2']},
'delete_vnf_VNF1': {
'action': 'tacker.delete_vnf vnf=<% $.vnf_id_VNF1%>',
'input': {'body': {'vnf': {'attributes': {
'force': False}}}}},
'delete_vnf_VNF2': {
'action': 'tacker.delete_vnf vnf=<% $.vnf_id_VNF2%>',
'input': {'body': {'vnf': {'attributes': {
'force': False}}}}}},
'type': 'direct',
'output': {
'status_VNF1': '<% $.status_VNF1 %>',
'status_VNF2': '<% $.status_VNF2 %>',
'mgmt_ip_address_VNF2': '<% $.mgmt_ip_address_VNF2 %>',
'mgmt_ip_address_VNF1': '<% $.mgmt_ip_address_VNF1 %>',
'vnffg_id_VNFFG1': '<% $.vnffg_id_VNFFG1 %>',
'vim_id_VNF2': '<% $.vim_id_VNF2 %>',
'vnf_id_VNF1': '<% $.vnf_id_VNF1 %>',
'vnf_id_VNF2': '<% $.vnf_id_VNF2 %>',
'vim_id_VNF1': '<% $.vim_id_VNF1 %>'}},
'version': '2.0'}
def dummy_delete_ns_obj():
return {'vnf_ids': "{'VNF1': '5de5eca6-3e21-4bbd-a9d7-86458de75f0c'}",
'vnffg_ids': "{}"}
def dummy_delete_vnffg_ns_obj():
return {'vnf_ids': "{'VNF1': '5de5eca6-3e21-4bbd-a9d7-86458de75f0c'}",
'vnffg_ids': "{'VNFFG1': '99066f25-3124-44f1-bc5d-bc0bf236b012'}"}
def get_dummy_delete_workflow():
return {'version': '2.0',
'std.delete_ns_dummy': {
'input': ['vnf_id_VNF1'],
'tasks': {
'delete_vnf_VNF1': {
'action': 'tacker.delete_vnf vnf=<% $.vnf_id_VNF1%>',
'input': {'body': {'vnf': {'attributes': {
'force': False}}}}}},
'type': 'direct'}}
def get_dummy_delete_vnffg_ns_workflow():
return {'version': '2.0',
'std.delete_ns_dummy': {
'input': ['vnf_id_VNF1', 'VNFFG1'],
'tasks': {
'delete_vnf_VNF1': {
'join': 'all',
'action': 'tacker.delete_vnf vnf=<% $.vnf_id_VNF1%>',
'input': {'body': {'vnf': {'attributes': {
'force': False}}}}},
'delete_vnffg_VNFFG1': {
'action': 'tacker.delete_vnffg vnffg='
'<% $.VNFFG1 %>',
'on-success': ['delete_vnf_VNF1']}},
'type': 'direct'}}
class FakeMistral(object):
def __init__(self):
pass
class FakeNFVOPlugin(object):
def __init__(self, context, client, resource, action):
self.context = context
self.client = client
self.wg = workflow_generator.WorkflowGenerator(resource, action)
def prepare_workflow(self, **kwargs):
self.wg.task(**kwargs)
class TestWorkflowGenerator(base.TestCase):
def setUp(self):
super(TestWorkflowGenerator, self).setUp()
self.mistral_client = FakeMistral()
def test_prepare_workflow_create(self):
fPlugin = FakeNFVOPlugin(context, self.mistral_client,
resource='ns', action='create')
fPlugin.prepare_workflow(ns=get_dummy_ns(), params=get_dummy_param())
wf_def_values = [fPlugin.wg.definition[k] for
k in fPlugin.wg.definition]
self.assertIn(get_dummy_create_workflow()['std.create_ns_dummy'],
wf_def_values)
self.assertEqual(get_dummy_create_workflow()['version'],
fPlugin.wg.definition['version'])
def test_prepare_vnffg_ns_workflow_create(self):
fPlugin = FakeNFVOPlugin(context, self.mistral_client,
resource='ns', action='create')
fPlugin.prepare_workflow(ns=get_dummy_vnffg_ns(),
params=get_dummy_param())
wf_def_values = [fPlugin.wg.definition[k] for
k in fPlugin.wg.definition]
self.assertIn(
get_dummy_create_vnffg_ns_workflow()['std.create_ns_dummy'],
wf_def_values)
self.assertEqual(
get_dummy_create_vnffg_ns_workflow()['version'],
fPlugin.wg.definition['version'])
def test_prepare_workflow_delete(self):
fPlugin = FakeNFVOPlugin(context, self.mistral_client,
resource='ns', action='delete')
fPlugin.prepare_workflow(ns=dummy_delete_ns_obj())
wf_def_values = [fPlugin.wg.definition[k] for
k in fPlugin.wg.definition]
self.assertIn(get_dummy_delete_workflow()['std.delete_ns_dummy'],
wf_def_values)
self.assertEqual(get_dummy_delete_workflow()['version'],
fPlugin.wg.definition['version'])
def test_prepare_vnffg_ns_workflow_delete(self):
fPlugin = FakeNFVOPlugin(context, self.mistral_client,
resource='ns', action='delete')
fPlugin.prepare_workflow(ns=dummy_delete_vnffg_ns_obj())
wf_def_values = [fPlugin.wg.definition[k] for
k in fPlugin.wg.definition]
self.assertIn(
get_dummy_delete_vnffg_ns_workflow()['std.delete_ns_dummy'],
wf_def_values)
self.assertEqual(
get_dummy_delete_vnffg_ns_workflow()['version'],
fPlugin.wg.definition['version'])
| {
"repo_name": "openstack/tacker",
"path": "tacker/tests/unit/nfvo/drivers/workflow/test_workflow_generator.py",
"copies": "1",
"size": "19658",
"license": "apache-2.0",
"hash": 6226684754930727000,
"line_mean": 47.7791563275,
"line_max": 79,
"alpha_frac": 0.3827958083,
"autogenerated": false,
"ratio": 3.868162140889414,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.974804330607626,
"avg_score": 0.0005829286226308559,
"num_lines": 403
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.image import base
from tempest.common.utils import data_utils
from tempest import test
class ImagesTagsTest(base.BaseV2ImageTest):
@test.attr(type='gate')
def test_update_delete_tags_for_image(self):
body = self.create_image(container_format='bare',
disk_format='raw',
visibility='private')
image_id = body['id']
tag = data_utils.rand_name('tag-')
self.addCleanup(self.client.delete_image, image_id)
# Creating image tag and verify it.
self.client.add_image_tag(image_id, tag)
body = self.client.get_image(image_id)
self.assertIn(tag, body['tags'])
# Deleting image tag and verify it.
self.client.delete_image_tag(image_id, tag)
body = self.client.get_image(image_id)
self.assertNotIn(tag, body['tags'])
| {
"repo_name": "CiscoSystems/tempest",
"path": "tempest/api/image/v2/test_images_tags.py",
"copies": "4",
"size": "1462",
"license": "apache-2.0",
"hash": -7385422591357195000,
"line_mean": 36.4871794872,
"line_max": 75,
"alpha_frac": 0.670998632,
"autogenerated": false,
"ratio": 3.9195710455764075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6590569677576408,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.volume import base
from tempest.common import waiters
from tempest.lib.common.utils import data_utils as utils
from tempest.lib import decorators
class QosSpecsTestJSON(base.BaseVolumeAdminTest):
"""Test the Cinder QoS-specs.
Tests for create, list, delete, show, associate,
disassociate, set/unset key APIs.
"""
@classmethod
def resource_setup(cls):
super(QosSpecsTestJSON, cls).resource_setup()
# Create admin qos client
# Create a test shared qos-specs for tests
cls.qos_name = utils.rand_name(cls.__name__ + '-QoS')
cls.qos_consumer = 'front-end'
cls.created_qos = cls.create_test_qos_specs(cls.qos_name,
cls.qos_consumer,
read_iops_sec='2000')
def _create_delete_test_qos_with_given_consumer(self, consumer):
name = utils.rand_name(self.__class__.__name__ + '-qos')
qos = {'name': name, 'consumer': consumer}
body = self.create_test_qos_specs(name, consumer)
for key in ['name', 'consumer']:
self.assertEqual(qos[key], body[key])
self.admin_volume_qos_client.delete_qos(body['id'])
self.admin_volume_qos_client.wait_for_resource_deletion(body['id'])
# validate the deletion
list_qos = self.admin_volume_qos_client.list_qos()['qos_specs']
self.assertNotIn(body, list_qos)
def _test_associate_qos(self, vol_type_id):
self.admin_volume_qos_client.associate_qos(
self.created_qos['id'], vol_type_id)
@decorators.idempotent_id('7e15f883-4bef-49a9-95eb-f94209a1ced1')
def test_create_delete_qos_with_front_end_consumer(self):
"""Tests the creation and deletion of QoS specs
With consumer as front end
"""
self._create_delete_test_qos_with_given_consumer('front-end')
@decorators.idempotent_id('b115cded-8f58-4ee4-aab5-9192cfada08f')
def test_create_delete_qos_with_back_end_consumer(self):
"""Tests the creation and deletion of QoS specs
With consumer as back-end
"""
self._create_delete_test_qos_with_given_consumer('back-end')
@decorators.idempotent_id('f88d65eb-ea0d-487d-af8d-71f4011575a4')
def test_create_delete_qos_with_both_consumer(self):
"""Tests the creation and deletion of QoS specs
With consumer as both front end and back end
"""
self._create_delete_test_qos_with_given_consumer('both')
@decorators.idempotent_id('7aa214cc-ac1a-4397-931f-3bb2e83bb0fd')
def test_get_qos(self):
"""Tests the detail of a given qos-specs"""
body = self.admin_volume_qos_client.show_qos(
self.created_qos['id'])['qos_specs']
self.assertEqual(self.qos_name, body['name'])
self.assertEqual(self.qos_consumer, body['consumer'])
@decorators.idempotent_id('75e04226-bcf7-4595-a34b-fdf0736f38fc')
def test_list_qos(self):
"""Tests the list of all qos-specs"""
body = self.admin_volume_qos_client.list_qos()['qos_specs']
self.assertIn(self.created_qos, body)
@decorators.idempotent_id('ed00fd85-4494-45f2-8ceb-9e2048919aed')
def test_set_unset_qos_key(self):
"""Test the addition of a specs key to qos-specs"""
args = {'iops_bytes': '500'}
body = self.admin_volume_qos_client.set_qos_key(
self.created_qos['id'],
iops_bytes='500')['qos_specs']
self.assertEqual(args, body)
body = self.admin_volume_qos_client.show_qos(
self.created_qos['id'])['qos_specs']
self.assertEqual(args['iops_bytes'], body['specs']['iops_bytes'])
# test the deletion of a specs key from qos-specs
keys = ['iops_bytes']
self.admin_volume_qos_client.unset_qos_key(self.created_qos['id'],
keys)
operation = 'qos-key-unset'
waiters.wait_for_qos_operations(self.admin_volume_qos_client,
self.created_qos['id'],
operation, keys)
body = self.admin_volume_qos_client.show_qos(
self.created_qos['id'])['qos_specs']
self.assertNotIn(keys[0], body['specs'])
@decorators.idempotent_id('1dd93c76-6420-485d-a771-874044c416ac')
def test_associate_disassociate_qos(self):
"""Test the following operations :
1. associate_qos
2. get_association_qos
3. disassociate_qos
4. disassociate_all_qos
"""
# create a test volume-type
vol_type = []
for _ in range(0, 3):
vol_type.append(self.create_volume_type())
# associate the qos-specs with volume-types
for i in range(0, 3):
self._test_associate_qos(vol_type[i]['id'])
# get the association of the qos-specs
body = self.admin_volume_qos_client.show_association_qos(
self.created_qos['id'])['qos_associations']
associations = [association['id'] for association in body]
for i in range(0, 3):
self.assertIn(vol_type[i]['id'], associations)
# disassociate a volume-type with qos-specs
self.admin_volume_qos_client.disassociate_qos(
self.created_qos['id'], vol_type[0]['id'])
operation = 'disassociate'
waiters.wait_for_qos_operations(self.admin_volume_qos_client,
self.created_qos['id'], operation,
vol_type[0]['id'])
# disassociate all volume-types from qos-specs
self.admin_volume_qos_client.disassociate_all_qos(
self.created_qos['id'])
operation = 'disassociate-all'
waiters.wait_for_qos_operations(self.admin_volume_qos_client,
self.created_qos['id'], operation)
| {
"repo_name": "Juniper/tempest",
"path": "tempest/api/volume/admin/test_qos.py",
"copies": "5",
"size": "6562",
"license": "apache-2.0",
"hash": -6047613392399180000,
"line_mean": 40.2704402516,
"line_max": 78,
"alpha_frac": 0.6110941786,
"autogenerated": false,
"ratio": 3.661830357142857,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6772924535742857,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest.api.image import base
from tempest import test
class ImagesTagsTest(base.BaseV2ImageTest):
@test.idempotent_id('10407036-6059-4f95-a2cd-cbbbee7ed329')
def test_update_delete_tags_for_image(self):
body = self.create_image(container_format='bare',
disk_format='raw',
visibility='private')
image_id = body['id']
tag = data_utils.rand_name('tag')
self.addCleanup(self.client.delete_image, image_id)
# Creating image tag and verify it.
self.client.add_image_tag(image_id, tag)
body = self.client.get_image(image_id)
self.assertIn(tag, body['tags'])
# Deleting image tag and verify it.
self.client.delete_image_tag(image_id, tag)
body = self.client.get_image(image_id)
self.assertNotIn(tag, body['tags'])
| {
"repo_name": "eggmaster/tempest",
"path": "tempest/api/image/v2/test_images_tags.py",
"copies": "2",
"size": "1502",
"license": "apache-2.0",
"hash": 8282386362508411000,
"line_mean": 36.55,
"line_max": 75,
"alpha_frac": 0.6764314248,
"autogenerated": false,
"ratio": 3.80253164556962,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.547896307036962,
"avg_score": null,
"num_lines": null
} |
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants
from neutron_lib.services.qos import base as qos_base
from neutron_lib.services.qos import constants as qos_consts
from neutron_lib.tests import _base
SUPPORTED_RULES = {
qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH: {
qos_consts.MIN_KBPS: {'type:values': None},
qos_consts.DIRECTION: {'type:values': ['egress']}
}
}
def _make_rule(rule_type='fake-rule-type', params=None):
mock_rule = mock.MagicMock()
mock_rule.rule_type = rule_type
params = params or {}
mock_rule.get = params.get
return mock_rule
def _make_driver(name='fake-driver',
vif_types=[portbindings.VIF_TYPE_OVS],
vnic_types=[portbindings.VNIC_NORMAL],
supported_rules=SUPPORTED_RULES,
requires_rpc_notifications=False):
return qos_base.DriverBase(
name, vif_types, vnic_types, supported_rules,
requires_rpc_notifications=requires_rpc_notifications)
class TestDriverBase(_base.BaseTestCase):
def test_is_loaded(self):
self.assertTrue(_make_driver().is_loaded())
def test_is_vif_type_compatible(self):
self.assertTrue(
_make_driver().is_vif_type_compatible(
portbindings.VIF_TYPE_OVS))
self.assertFalse(
_make_driver().is_vif_type_compatible(
portbindings.VIF_TYPE_BRIDGE))
def test_is_vnic_compatible(self):
self.assertTrue(
_make_driver().is_vnic_compatible(portbindings.VNIC_NORMAL))
self.assertFalse(
_make_driver().is_vnic_compatible(portbindings.VNIC_BAREMETAL))
def test_is_rule_supported_with_unsupported_rule(self):
self.assertFalse(_make_driver().is_rule_supported(_make_rule()))
def test_is_rule_supported(self):
self.assertTrue(
_make_driver().is_rule_supported(
_make_rule(
rule_type=qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH,
params={qos_consts.MIN_KBPS: None,
qos_consts.DIRECTION:
constants.EGRESS_DIRECTION})))
self.assertFalse(
_make_driver().is_rule_supported(
_make_rule(
rule_type=qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH,
params={qos_consts.MIN_KBPS: None,
qos_consts.DIRECTION:
constants.INGRESS_DIRECTION})))
| {
"repo_name": "openstack/neutron-lib",
"path": "neutron_lib/tests/unit/services/qos/test_base.py",
"copies": "1",
"size": "3161",
"license": "apache-2.0",
"hash": -4512112298698193400,
"line_mean": 35.7558139535,
"line_max": 78,
"alpha_frac": 0.6289149003,
"autogenerated": false,
"ratio": 3.9218362282878414,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5050751128587841,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants
from neutron_lib import context
from neutron_lib import exceptions as lib_exc
from neutron_lib.plugins import constants as p_cons
from neutron_lib.plugins import directory
from oslo_utils import uuidutils
import testtools
from neutron.services.l3_router.service_providers import driver_controller
from neutron.services.l3_router.service_providers import single_node
from neutron.services import provider_configuration
from neutron.tests import base
from neutron.tests.unit import testlib_api
DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
class TestDriverController(testlib_api.SqlTestCase):
def setUp(self):
super(TestDriverController, self).setUp()
self.setup_coreplugin(DB_PLUGIN_KLASS)
self.fake_l3 = mock.Mock()
self.dc = driver_controller.DriverController(self.fake_l3)
self.fake_l3.l3_driver_controller = self.dc
self.ctx = context.get_admin_context()
def _return_provider_for_flavor(self, provider):
self.dc._flavor_plugin_ref = mock.Mock()
self.dc._flavor_plugin_ref.get_flavor.return_value = {'id': 'abc'}
provider = {'provider': provider}
self.dc._flavor_plugin_ref.get_flavor_next_provider.return_value = [
provider]
def test_uses_scheduler(self):
self._return_provider_for_flavor('dvrha')
router_db = mock.Mock()
flavor_id = uuidutils.generate_uuid()
router_id = uuidutils.generate_uuid()
router = dict(id=router_id, flavor_id=flavor_id)
self.dc._set_router_provider('router', 'PRECOMMIT_CREATE', self,
self.ctx, router, router_db)
self.assertTrue(self.dc.uses_scheduler(self.ctx, router_id))
self.dc.drivers['dvrha'].use_integrated_agent_scheduler = False
self.assertFalse(self.dc.uses_scheduler(self.ctx, router_id))
def test_driver_owns_router(self):
self._return_provider_for_flavor('dvrha')
router_db = mock.Mock()
flavor_id = uuidutils.generate_uuid()
r1 = uuidutils.generate_uuid()
r2 = uuidutils.generate_uuid()
router = dict(id=r1, flavor_id=flavor_id)
self.dc._set_router_provider('router', 'PRECOMMIT_CREATE', self,
self.ctx, router, router_db)
self.assertTrue(self.dc.drivers['dvrha'].owns_router(self.ctx, r1))
self.assertFalse(self.dc.drivers['dvr'].owns_router(self.ctx, r1))
self.assertFalse(self.dc.drivers['dvr'].owns_router(self.ctx, r2))
self.assertFalse(self.dc.drivers['dvr'].owns_router(self.ctx, None))
@mock.patch('neutron_lib.callbacks.registry.publish')
def test__set_router_provider_flavor_specified(self, mock_cb):
self._return_provider_for_flavor('dvrha')
router_db = mock.Mock()
flavor_id = uuidutils.generate_uuid()
router_id = uuidutils.generate_uuid()
router = dict(id=router_id, flavor_id=flavor_id)
self.dc._set_router_provider('router', 'PRECOMMIT_CREATE', self,
self.ctx, router, router_db)
mock_cb.assert_called_with(resources.ROUTER_CONTROLLER,
events.PRECOMMIT_ADD_ASSOCIATION, mock.ANY,
payload=mock.ANY)
payload = mock_cb.mock_calls[0][2]['payload']
self.assertEqual(router, payload.request_body)
self.assertEqual(router_db, payload.latest_state)
self.assertEqual(flavor_id, router_db.flavor_id)
self.assertEqual(self.dc.drivers['dvrha'],
self.dc.get_provider_for_router(self.ctx,
router_id))
def test__update_router_provider_invalid(self):
test_dc = driver_controller.DriverController(self.fake_l3)
with mock.patch.object(registry, "publish") as mock_cb:
with mock.patch.object(test_dc, "get_provider_for_router"):
with mock.patch.object(
driver_controller,
"_ensure_driver_supports_request") as _ensure:
_ensure.side_effect = lib_exc.InvalidInput(
error_message='message')
self.assertRaises(
lib_exc.InvalidInput,
test_dc._update_router_provider,
None, None, None,
payload=events.DBEventPayload(
None, request_body={'name': 'testname'},
states=({'flavor_id': 'old_fid'},)))
mock_cb.assert_not_called()
def test___attrs_to_driver(self):
test_dc = driver_controller.DriverController(self.fake_l3)
test_dc.default_provider = 'single_node'
self.assertIsInstance(test_dc._attrs_to_driver({}),
single_node.SingleNodeDriver)
test_dc.default_provider = 'ha'
with mock.patch.object(driver_controller,
"_is_driver_compatible", return_value=False):
self.assertRaises(NotImplementedError, test_dc._attrs_to_driver,
{})
def test__update_router_provider_with_flags(self):
test_dc = driver_controller.DriverController(self.fake_l3)
with mock.patch.object(registry, "publish"):
with mock.patch.object(test_dc, "get_provider_for_router"):
with mock.patch.object(
driver_controller,
"_ensure_driver_supports_request") as _ensure:
_ensure.side_effect = lib_exc.InvalidInput(
error_message='message')
with mock.patch(
"neutron.services.l3_router.service_providers."
"driver_controller.LOG.debug") as mock_log:
self.assertRaises(
lib_exc.InvalidInput,
test_dc._update_router_provider,
None, None, None,
payload=events.DBEventPayload(
None, request_body={'name': 'testname',
'distributed': False},
states=({'flavor_id': None,
'distributed': True, 'ha': False},)))
# To validate that the 'ha' attribute of the router
# stays unchanged from the previous state while
# updating 'distributed' from True to False.
mock_log.assert_any_call(
"Get a provider driver handle based on the ha "
"flag: %(ha_flag)s and distributed flag: "
"%(distributed_flag)s",
{'ha_flag': False, 'distributed_flag': False})
@mock.patch('neutron_lib.callbacks.registry.publish')
def test__set_router_provider_attr_lookups(self, mock_cb):
# ensure correct drivers are looked up based on attrs
router_id1 = uuidutils.generate_uuid()
router_id2 = uuidutils.generate_uuid()
router_id3 = uuidutils.generate_uuid()
router_id4 = uuidutils.generate_uuid()
router_id5 = uuidutils.generate_uuid()
router_id6 = uuidutils.generate_uuid()
router_id7 = uuidutils.generate_uuid()
router_id8 = uuidutils.generate_uuid()
router_id9 = uuidutils.generate_uuid()
cases = [
('dvrha', dict(id=router_id1, distributed=True, ha=True)),
('dvr', dict(id=router_id2, distributed=True, ha=False)),
('ha', dict(id=router_id3, distributed=False, ha=True)),
('single_node', dict(id=router_id4, distributed=False,
ha=False)),
('ha', dict(id=router_id5, ha=True,
distributed=constants.ATTR_NOT_SPECIFIED)),
('dvr', dict(id=router_id6, distributed=True,
ha=constants.ATTR_NOT_SPECIFIED)),
('single_node', dict(id=router_id7, ha=False,
distributed=constants.ATTR_NOT_SPECIFIED)),
('single_node', dict(id=router_id8, distributed=False,
ha=constants.ATTR_NOT_SPECIFIED)),
('single_node', dict(id=router_id9,
distributed=constants.ATTR_NOT_SPECIFIED,
ha=constants.ATTR_NOT_SPECIFIED)),
]
for driver, body in cases:
self.dc._set_router_provider('router', 'PRECOMMIT_CREATE', self,
self.ctx, body, mock.Mock())
mock_cb.assert_called_with(
resources.ROUTER_CONTROLLER,
events.PRECOMMIT_ADD_ASSOCIATION, mock.ANY,
payload=mock.ANY)
self.assertEqual(self.dc.drivers[driver],
self.dc.get_provider_for_router(self.ctx,
body['id']),
'Expecting %s for body %s' % (driver, body))
@mock.patch('neutron_lib.callbacks.registry.publish')
def test__clear_router_provider(self, mock_cb):
# ensure correct drivers are looked up based on attrs
router_id1 = uuidutils.generate_uuid()
body = dict(id=router_id1, distributed=True, ha=True)
self.dc._set_router_provider('router', 'PRECOMMIT_CREATE', self,
self.ctx, body, mock.Mock())
mock_cb.assert_called_with(resources.ROUTER_CONTROLLER,
events.PRECOMMIT_ADD_ASSOCIATION, mock.ANY,
payload=mock.ANY)
payload = mock_cb.mock_calls[0][2]['payload']
self.assertEqual(self.ctx, payload.context)
self.assertIn('old_driver', payload.metadata)
self.assertIn('new_driver', payload.metadata)
self.assertIsNotNone(payload.latest_state)
self.assertEqual(self.dc.drivers['dvrha'],
self.dc.get_provider_for_router(self.ctx,
body['id']))
self.dc._clear_router_provider('router', 'PRECOMMIT_DELETE', self,
self.ctx, body['id'])
mock_cb.assert_called_with(resources.ROUTER_CONTROLLER,
events.PRECOMMIT_DELETE_ASSOCIATIONS, mock.ANY,
payload=mock.ANY)
with testtools.ExpectedException(ValueError):
# if association was cleared, get_router will be called
self.fake_l3.get_router.side_effect = ValueError
self.dc.get_provider_for_router(self.ctx, body['id'])
mock_cb.assert_called_with(resources.ROUTER_CONTROLLER,
events.PRECOMMIT_ADD_ASSOCIATION,
mock.ANY, payload=mock.ANY)
def test__flavor_plugin(self):
directory.add_plugin(p_cons.FLAVORS, mock.Mock())
_dc = driver_controller.DriverController(self.fake_l3)
self.assertEqual(
directory.get_plugin(p_cons.FLAVORS), _dc._flavor_plugin)
class Test_LegacyPlusProviderConfiguration(base.BaseTestCase):
@mock.patch.object(provider_configuration.ProviderConfiguration,
"add_provider")
def test__update_router_provider_invalid(self, mock_method):
mock_method.side_effect = lib_exc.Invalid(message='message')
driver_controller._LegacyPlusProviderConfiguration()
| {
"repo_name": "openstack/neutron",
"path": "neutron/tests/unit/services/l3_router/service_providers/test_driver_controller.py",
"copies": "2",
"size": "12414",
"license": "apache-2.0",
"hash": -4246947605663743000,
"line_mean": 49.2591093117,
"line_max": 78,
"alpha_frac": 0.5809569841,
"autogenerated": false,
"ratio": 4.160187667560321,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006169269327164065,
"num_lines": 247
} |
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import itertools
import logging
import time
import traceback
import yaml
from openstack import connection
from tricircle.tests.network_sdk import network_service
from tricircle.tests.tricircle_sdk import multiregion_network_service
LOG = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
SLEEP_INTERVAL = 20
class DummyRunner(object):
class DummyResource(object):
def __init__(self, _id):
self.id = _id
def __getattr__(self, item):
return item
def __init__(self):
self.id_pool = {}
def _get_id(self, _type):
if _type not in self.id_pool:
self.id_pool[_type] = 0
self.id_pool[_type] += 1
return '%s%d_id' % (_type, self.id_pool[_type])
def create(self, region, _type, params):
_id = self._get_id(_type)
msg = 'create %s with id %s in %s, params: %s' % (
_type, _id, region, params)
LOG.info(msg)
return self.DummyResource(_id)
def action(self, region, _type, target, method, params):
msg = '%s %s with id %s in %s, params: %s' % (
method, _type, target, region, params)
LOG.info(msg)
def query(self, region, _type, get_one, params):
if get_one:
return self.DummyResource(self._get_id(_type))
return []
def validate(self, region, _type, predicate, conditions, params):
msg = 'validate %s, conditions: %s' % (_type, conditions)
LOG.info(msg)
class SDKRunner(object):
region_map = {'central': 'CentralRegion',
'region1': 'RegionOne',
'region2': 'RegionTwo'}
serv_reslist_map = {
'network_sdk': ['network', 'subnet', 'port', 'router', 'fip', 'trunk',
'flow_classifier', 'port_pair', 'port_pair_group',
'port_chain', 'qos_policy', 'qos_bandwidth_limit_rule',
'qos_dscp_marking_rule', 'qos_minimum_bandwidth_rule'],
'compute': ['server'],
'image': ['image'],
'tricircle_sdk': ['job']}
res_alias_map = {
'fip': 'ip'}
type_plural_map = {
'qos_policy': 'qos_policie'}
def __init__(self, auth_url, project, username, password,
project_domain_id, user_domain_id):
self.res_serv_map = {}
for serv in self.serv_reslist_map:
for res in self.serv_reslist_map[serv]:
self.res_serv_map[res] = serv
self.connection_map = {}
auth = {
'auth_url': auth_url,
'project_name': project,
'user_domain_name': 'default',
'project_domain_name': 'default',
'username': username,
'password': password,
'project_domain_id': project_domain_id,
'user_domain_id': user_domain_id}
for region in ('CentralRegion', 'RegionOne', 'RegionTwo'):
extra_services = []
if region == 'CentralRegion':
serv = multiregion_network_service.MultiregionNetworkService(
version='v1')
extra_services.append(serv)
net_serv = network_service.NetworkService(version='v2')
extra_services.append(net_serv)
conn = connection.Connection(region_name=region,
auth=auth,
extra_services=extra_services)
conn.config.config['network_sdk_service_type'] = 'network'
conn.config.config['tricircle_sdk_service_type'] = 'tricircle'
conn.config.config['network_sdk_api_version'] = 'v2'
conn.config.config['tricircle_sdk_api_version'] = 'v1'
for service in extra_services:
conn.add_service(service)
self.connection_map[region] = conn
def create(self, region, _type, params):
conn = self.connection_map[self.region_map[region]]
serv = self.res_serv_map[_type]
_type = self.res_alias_map.get(_type, _type)
desc = getattr(conn, serv)
try:
proxy = desc.__get__(conn, '')
except Exception:
proxy = desc
return getattr(proxy, 'create_%s' % _type)(**params)
def action(self, region, _type, target, method, params):
conn = self.connection_map[self.region_map[region]]
serv = self.res_serv_map[_type]
_type = self.res_alias_map.get(_type, _type)
desc = getattr(conn, serv)
try:
proxy = desc.__get__(conn, '')
except Exception:
proxy = desc
if method in ('update', 'delete'):
method = '%s_%s' % (method, _type)
getattr(proxy, method)(target, **params)
def query(self, region, _type, get_one, params):
conn = self.connection_map[self.region_map[region]]
serv = self.res_serv_map[_type]
_type = self.res_alias_map.get(_type, _type)
desc = getattr(conn, serv)
try:
proxy = desc.__get__(conn, '')
except Exception:
proxy = desc
_type = self.type_plural_map.get(_type, _type)
_list = list(getattr(proxy, '%ss' % _type)(**params))
if get_one:
return _list[0]
return _list
def validate(self, region, _type, predicate, conditions, params):
def validate_value(actual, expected):
if isinstance(expected, list):
actual_len = len(actual)
expected_len = len(expected)
if actual_len != expected_len:
return False
for actual_list in itertools.permutations(actual, actual_len):
for expected_list in itertools.permutations(expected,
expected_len):
match = True
for i, actual_ele in enumerate(actual_list):
if not validate_value(actual_ele,
expected_list[i]):
match = False
break
if match:
return True
return False
elif isinstance(expected, dict):
for k in expected:
if not validate_value(actual[k], expected[k]):
return False
return True
elif isinstance(expected, str):
tokens = expected.split('*')
if tokens[0] == '' and tokens[-1] == '':
return actual.find(tokens[1]) != -1
elif tokens[0] == '':
return actual.endswith(tokens[-1])
elif tokens[-1] == '':
return actual.startswith(tokens[0])
return actual == expected
else:
return actual == expected
def validate_any_condition(results, condition):
for result in results:
if all(validate_value(
getattr(result, key),
value) for (key, value) in condition.items()):
return True
return False
def validate_all_condition(results, condition):
for result in results:
if not all(validate_value(
getattr(result, key),
value) for (key, value) in condition.items()):
return False
return True
results = self.query(region, _type, False, params)
if predicate == 'any':
for condition in conditions:
if not validate_any_condition(results, condition):
raise Exception(
'Validation fail, acutal results: %s, '
'expected results: %s' % (results, condition))
elif predicate == 'all':
for condition in conditions:
if not validate_all_condition(results, condition):
raise Exception(
'Validation fail, acutal results: %s, '
'expected results: %s' % (results, condition))
class RunnerEngine(object):
def __init__(self, yaml_path, runner):
self.task_set_map = {}
self.task_set_id_list = []
self.runner = runner
with open(yaml_path) as f:
data = yaml.safe_load(f)
self._parse_data(data)
def _validate_task(self, task):
def collect_require_from_dict(requires, _dict):
for v in _dict.values():
if isinstance(v, list):
collect_require_from_list(requires, v)
elif isinstance(v, dict):
collect_require_from_dict(requires, v)
elif not isinstance(v, str):
continue
elif '@' in v:
requires.append(v)
def collect_require_from_list(requires, _list):
for v in _list:
if isinstance(v, list):
collect_require_from_list(requires, v)
elif isinstance(v, dict):
collect_require_from_dict(requires, v)
elif not isinstance(v, str):
continue
elif '@' in v:
requires.append(v)
for field in ('task_id', 'region', 'type'):
if field not in task:
raise Exception('Required field %s not set' % field)
for sub_section, fields in [('action', ['target', 'method']),
('query', ['get_one']),
('validate', ['predicate', 'condition'])]:
if sub_section in task:
for field in fields:
if field not in task[sub_section]:
raise Exception('Required field %s for %s '
'not set' % (field, sub_section))
requires = []
if 'params' in task:
collect_require_from_dict(requires, task['params'])
if 'validate' in task:
collect_require_from_dict(requires, task['validate'])
if 'action' in task:
requires.append(task['action']['target'])
depend = task.get('depend', [])
for value in requires:
tokens = value.split('@')
if len(tokens) == 2 and tokens[0] not in depend:
raise Exception(
'Depend list not complete for %s: %s not in %s' % (
task['task_id'], tokens[0], depend))
elif len(tokens) == 3:
task_set_id, task_id = tokens[:2]
if task_set_id not in self.task_set_map:
raise Exception(
'Depend task set %s for %s not found' % (
task_set_id, task['task_id']))
task_map, _, _ = self.task_set_map[task_set_id]
if task_id not in task_map:
raise Exception(
'Depend task %s for %s not found' % (
task_id, task['task_id']))
@staticmethod
def _parse_dependency(depend_map):
depend_map = copy.deepcopy(depend_map)
ordered_list = []
while len(depend_map):
pop_list = []
for _id in depend_map:
if not depend_map[_id]:
ordered_list.append(_id)
pop_list.append(_id)
for _id in pop_list:
depend_map.pop(_id)
for depend in depend_map.values():
for _id in pop_list:
if _id in depend:
depend.remove(_id)
if not pop_list:
raise Exception('Unresolved dependency, '
'left s: %s' % depend_map.keys())
return ordered_list
def _parse_data(self, data):
task_set_depend_map = {}
task_set_tasks_map = {}
for task_set in data:
task_set_id = task_set['task_set_id']
self.task_set_id_list.append(task_set_id)
task_set_depend_map[task_set_id] = set(
task_set.get('depend', []))
task_set_tasks_map[task_set_id] = task_set['tasks']
ordered_task_set_list = self._parse_dependency(task_set_depend_map)
for task_set_id in ordered_task_set_list:
task_map = {}
task_depend_map = {}
for task in task_set_tasks_map[task_set_id]:
task_map[task['task_id']] = task
task_depend_map[task['task_id']] = set(task.get('depend', []))
self._validate_task(task)
ordered_task_list = self._parse_dependency(task_depend_map)
self.task_set_map[task_set_id] = (task_map, ordered_task_list,
task_set_depend_map[task_set_id])
@staticmethod
def _fill_depend_field_in_list(_list, task_result_map,
depend_task_result_map):
if not _list:
return
for i, e in enumerate(_list):
if isinstance(e, list):
RunnerEngine._fill_depend_field_in_list(e, task_result_map,
depend_task_result_map)
elif isinstance(e, dict):
RunnerEngine._fill_depend_filed_in_dict(e, task_result_map,
depend_task_result_map)
if not isinstance(e, str):
continue
tokens = e.split('@')
if len(tokens) == 2:
task_id, task_filed = tokens
_list[i] = getattr(task_result_map[task_id], task_filed)
elif len(tokens) == 3:
task_set_id, task_id, task_filed = tokens
_list[i] = getattr(
depend_task_result_map[task_set_id][task_id], task_filed)
@staticmethod
def _fill_depend_filed_in_dict(_dict, task_result_map,
depend_task_result_map):
if not _dict:
return
for k, v in _dict.items():
if isinstance(v, list):
RunnerEngine._fill_depend_field_in_list(v, task_result_map,
depend_task_result_map)
elif isinstance(v, dict):
RunnerEngine._fill_depend_filed_in_dict(v, task_result_map,
depend_task_result_map)
if not isinstance(v, str):
continue
tokens = v.split('@')
if len(tokens) == 2:
task_id, task_filed = tokens
_dict[k] = getattr(task_result_map[task_id], task_filed)
elif len(tokens) == 3:
task_set_id, task_id, task_filed = tokens
_dict[k] = getattr(
depend_task_result_map[task_set_id][task_id], task_filed)
@staticmethod
def _fill_depend_field(params, task_result_map, depend_task_result_map):
RunnerEngine._fill_depend_filed_in_dict(params, task_result_map,
depend_task_result_map)
@staticmethod
def _retry(task_id, retry_num, func, *args):
run_time = retry_num + 1
for i in range(run_time):
try:
func(*args)
break
except Exception:
if i == run_time - 1:
raise
else:
time.sleep(SLEEP_INTERVAL)
LOG.info('Redo failed task %s', task_id)
def run_tasks(self, task_set_id, depend_task_set_result={}):
if task_set_id not in self.task_set_map:
raise Exception('Task set %s not found' % task_set_id)
(task_map, ordered_task_list,
task_set_depend) = self.task_set_map[task_set_id]
for set_id in task_set_depend:
if set_id not in depend_task_set_result:
raise Exception('Task set %s fails, reason: result for depend '
'task set %s not given' % (task_set_id,
set_id))
task_result_map = {}
for task_id in ordered_task_list:
task = task_map[task_id]
params = task.get('params', {})
self._fill_depend_field(params, task_result_map,
depend_task_set_result)
try:
if 'action' in task:
self._fill_depend_field(task['action'], task_result_map,
depend_task_set_result)
self._retry(task_id, task['action'].get('retries', 0),
self.runner.action, task['region'],
task['type'], task['action']['target'],
task['action']['method'], params)
elif 'query' in task:
result = self.runner.query(
task['region'], task['type'],
task['query']['get_one'], params)
task_result_map[task_id] = result
elif 'validate' in task:
self._fill_depend_field(task['validate'], task_result_map,
depend_task_set_result)
self._retry(task_id, task['validate'].get('retries', 0),
self.runner.validate, task['region'],
task['type'], task['validate']['predicate'],
task['validate']['condition'], params)
else:
result = self.runner.create(task['region'],
task['type'], params)
task_result_map[task_id] = result
LOG.info('Task %s done\n' % task_id)
except Exception:
error_msg = 'Task %s fails, reason: %s' % (
task_id, traceback.format_exc())
return task_result_map, error_msg
return task_result_map, None
def run_task_sets(self):
task_set_result_map = {}
for task_set_id in self.task_set_id_list:
_, _, task_set_depend = self.task_set_map[task_set_id]
depend_task_set_result = dict(
[(_id, task_set_result_map[_id]) for _id in task_set_depend])
task_result_map, error_msg = self.run_tasks(
task_set_id, depend_task_set_result)
if error_msg:
return error_msg
task_set_result_map[task_set_id] = task_result_map
| {
"repo_name": "stackforge/tricircle",
"path": "tricircle/tempestplugin/task_runner.py",
"copies": "1",
"size": "19578",
"license": "apache-2.0",
"hash": 8764798303787939000,
"line_mean": 40.9229122056,
"line_max": 79,
"alpha_frac": 0.4931555828,
"autogenerated": false,
"ratio": 4.249620143260256,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5242775726060256,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
from unittest import mock
import ddt
from rally import exceptions
from rally.plugins.task.exporters.elastic import exporter as elastic
from tests.unit import test
PATH = "rally.plugins.task.exporters.elastic.exporter"
class ValidatorTestCase(test.TestCase):
@mock.patch("%s.client.ElasticSearchClient" % PATH)
def test_validate(self, mock_elastic_search_client):
validator = elastic.Validator()
client = mock_elastic_search_client.return_value
validator.validate({}, {}, None, {"destination": "/home/foo"})
self.assertFalse(mock_elastic_search_client.called)
client.version.return_value = "2.5.1"
validator.validate({}, {}, None, {"destination": None})
client.version.return_value = "5.6.2"
validator.validate({}, {}, None, {"destination": None})
client.version.return_value = "1.1.1"
e = self.assertRaises(
elastic.validation.ValidationError,
validator.validate, {}, {}, None, {"destination": None})
self.assertEqual("The unsupported version detected 1.1.1.",
e.message)
exp_e = exceptions.RallyException("foo")
client.version.side_effect = exp_e
actual_e = self.assertRaises(
elastic.validation.ValidationError,
validator.validate, {}, {}, None, {"destination": None})
self.assertEqual(exp_e.format_message(), actual_e.message)
def get_tasks_results():
task_uuid = "2fa4f5ff-7d23-4bb0-9b1f-8ee235f7f1c8"
subtask_uuid = "35166362-0b11-4e74-929d-6988377e2da2"
return [{
"id": 1,
"uuid": task_uuid,
"deployment_uuid": "deployment-uuu-iii-iii-ddd",
"env_uuid": "deployment-uuu-iii-iii-ddd",
"env_name": "env-name",
"title": "foo",
"description": "bar",
"status": "ok",
"pass_sla": "yup",
"task_duration": "dur",
"tags": ["tag-1", "tag-2"],
"subtasks": [{
"task_uuid": task_uuid,
"subtask_uuid": subtask_uuid,
"workloads": [{
"id": 3,
"position": 0,
"uuid": "4dcd88a5-164b-4431-8b44-3979868116dd",
"task_uuid": task_uuid,
"subtask_uuid": subtask_uuid,
"name": "CinderVolumes.list_volumes",
"args": {"key1": "value1"},
"description": "List all volumes.",
"runner_type": "constant",
"runner": {"type": "constant",
"times": 3},
"sla": {},
"contexts": {"users@openstack": {"tenants": 2}},
"created_at": "2017-07-28T23:35:46",
"updated_at": "2017-07-28T23:37:55",
"start_time": 1501284950.371992,
"failed_iteration_count": 2,
"load_duration": 97.82577991485596,
"full_duration": 127.59103488922119,
"pass_sla": False,
"sla_results": {
"sla": [{"criterion": "JoGo",
"success": False,
"detail": "because i do not like you"}]
},
"statistics": {
"durations": {
"total": {
"data": {"success": "80.0%"}
}
}
},
"data": [
# iteration where the unwrapped action failed
{"timestamp": 1501284950.371992,
"error": ["ERROR!"],
"duration": 10.096552848815918,
"idle_duration": 0,
"atomic_actions": [
{"finished_at": 1501284961.468537,
"started_at": 1501284950.372052,
"name": "cinder.list_volumes",
"children": []}]},
# iteration where the known action failed
{"timestamp": 1501284950.371992,
"error": ["ERROR!"],
"duration": 10.096552848815918,
"idle_duration": 0,
"atomic_actions": [
{"finished_at": 1501284961.468537,
"started_at": 1501284950.372052,
"name": "cinder.list_volumes",
"failed": True,
"children": []}]}
]}]
}]
}]
@ddt.ddt
class ElasticSearchExporterTestCase(test.TestCase):
def setUp(self):
super(ElasticSearchExporterTestCase, self).setUp()
self.patcher = mock.patch.object(elastic.client, "ElasticSearchClient")
self.es_cls = self.patcher.start()
self.addCleanup(self.patcher.stop)
def test_init(self):
exporter = elastic.ElasticSearchExporter([], "http://example.com")
self.assertTrue(exporter._remote)
self.assertEqual(self.es_cls.return_value,
getattr(exporter, "_client"))
exporter = elastic.ElasticSearchExporter([], None)
self.assertTrue(exporter._remote)
self.assertEqual(self.es_cls.return_value,
getattr(exporter, "_client"))
exporter = elastic.ElasticSearchExporter([], "/foo/bar")
self.assertFalse(exporter._remote)
self.assertIsNone(getattr(exporter, "_client", None))
@ddt.data(None, "/home/bar", "https://example.com")
def test__add_index(self, destination):
index = "foo"
doc_type = "bar"
body = {
"key1": "value1",
"key2": "value2"
}
doc_id = "2fa4f5ff-7d23-4bb0-9b1f-8ee235f7f1c8"
exporter = elastic.ElasticSearchExporter([], destination)
exporter._add_index(index=index,
body=body,
doc_id=doc_id,
doc_type=doc_type)
self.assertEqual(2, len(exporter._report))
self.assertEqual({"index": {"_index": index,
"_type": doc_type,
"_id": doc_id}},
json.loads(exporter._report[0]))
@ddt.data(True, False)
@mock.patch("%s.ElasticSearchExporter._add_index" % PATH)
def test__process_atomic_actions(self, known_fail, mock__add_index):
es_exporter = elastic.ElasticSearchExporter({}, None)
itr_data = {"id": "foo_bar_uuid",
"error": ["I was forced to fail. Sorry"],
"timestamp": 1, "duration": 2, "idle_duration": 1}
workload = {
"scenario_cfg": ["key1=value1"],
"runner_name": "foo",
"runner_cfg": ["times=3"],
"contexts": ["users@openstack.tenants=2"],
"deployment_uuid": "dep_uuid", "deployment_name": "dep_name"}
atomic_actions = [
{"name": "do_something",
"started_at": 1, "finished_at": 2,
"children": []},
{"name": "fail_something",
"started_at": 3,
"finished_at": 4,
"children": [
{"name": "rm -rf", "started_at": 3, "finished_at": 4,
"children": []}
]},
]
if known_fail:
atomic_actions[-1]["failed"] = True
atomic_actions[-1]["children"][-1]["failed"] = True
es_exporter._process_atomic_actions(
atomic_actions=atomic_actions, itr=itr_data,
workload_id="wid", workload=workload)
expected_calls = [
mock.call(
"rally_atomic_action_data_v1",
{
"deployment_uuid": "dep_uuid",
"deployment_name": "dep_name",
"action_name": "do_something",
"scenario_cfg": ["key1=value1"],
"contexts": ["users@openstack.tenants=2"],
"runner_name": "foo",
"runner_cfg": ["times=3"],
"started_at": "1970-01-01T00:00:01",
"finished_at": "1970-01-01T00:00:02",
"duration": 1,
"success": True,
"error": None,
"parent": None,
"workload_uuid": "wid"},
doc_id="foo_bar_uuid_action_do_something_0"),
mock.call(
"rally_atomic_action_data_v1",
{
"deployment_uuid": "dep_uuid",
"deployment_name": "dep_name",
"action_name": "fail_something",
"scenario_cfg": ["key1=value1"],
"contexts": ["users@openstack.tenants=2"],
"runner_name": "foo",
"runner_cfg": ["times=3"],
"started_at": "1970-01-01T00:00:03",
"finished_at": "1970-01-01T00:00:04",
"duration": 1,
"success": not known_fail,
"error": itr_data["error"] if known_fail else None,
"parent": None,
"workload_uuid": "wid"},
doc_id="foo_bar_uuid_action_fail_something_0"),
mock.call(
"rally_atomic_action_data_v1",
{
"deployment_uuid": "dep_uuid",
"deployment_name": "dep_name",
"action_name": "rm -rf",
"scenario_cfg": ["key1=value1"],
"contexts": ["users@openstack.tenants=2"],
"runner_name": "foo",
"runner_cfg": ["times=3"],
"started_at": "1970-01-01T00:00:03",
"finished_at": "1970-01-01T00:00:04",
"duration": 1,
"success": not known_fail,
"error": itr_data["error"] if known_fail else None,
"parent": "foo_bar_uuid_action_fail_something_0",
"workload_uuid": "wid"},
doc_id="foo_bar_uuid_action_rm -rf_0")]
if not known_fail:
expected_calls.append(mock.call(
"rally_atomic_action_data_v1",
{
"deployment_uuid": "dep_uuid",
"deployment_name": "dep_name",
"action_name": "no-name-action",
"scenario_cfg": ["key1=value1"],
"contexts": ["users@openstack.tenants=2"],
"runner_name": "foo",
"runner_cfg": ["times=3"],
"started_at": "1970-01-01T00:00:04",
"finished_at": "1970-01-01T00:00:04",
"duration": 0,
"success": False,
"error": itr_data["error"],
"parent": None,
"workload_uuid": "wid"},
doc_id="foo_bar_uuid_action_no-name-action_0"))
self.assertEqual(expected_calls, mock__add_index.call_args_list)
def test_generate_fails_on_doc_exists(self):
destination = "http://example.com"
client = self.es_cls.return_value
client.check_document.side_effect = (False, True)
tasks = get_tasks_results()
second_task = copy.deepcopy(tasks[-1])
second_task["subtasks"] = []
tasks.append(second_task)
exporter = elastic.ElasticSearchExporter(tasks, destination)
e = self.assertRaises(exceptions.RallyException, exporter.generate)
self.assertIn("Failed to push the task %s" % tasks[0]["uuid"],
e.format_message())
def test__ensure_indices(self):
es = mock.MagicMock()
exporter = elastic.ElasticSearchExporter([], None)
exporter._client = es
# case #1: everything exists
es.list_indices.return_value = [exporter.WORKLOAD_INDEX,
exporter.TASK_INDEX,
exporter.AA_INDEX]
exporter._ensure_indices()
self.assertFalse(es.create_index.called)
es.list_indices.assert_called_once_with()
# case #2: some indices exist
es.list_indices.reset_mock()
es.list_indices.return_value = [exporter.TASK_INDEX, exporter.AA_INDEX]
exporter._ensure_indices()
es.list_indices.assert_called_once_with()
es.create_index.assert_called_once_with(
exporter.WORKLOAD_INDEX, doc_type="data",
properties=exporter.INDEX_SCHEMAS[exporter.WORKLOAD_INDEX]
)
# case #3: none of indices exists
es.list_indices.reset_mock()
es.create_index.reset_mock()
es.list_indices.return_value = []
exporter._ensure_indices()
es.list_indices.assert_called_once_with()
self.assertEqual(3, es.create_index.call_count)
@ddt.data(True, False)
def test_generate(self, remote):
if remote:
destination = "http://example.com"
client = self.es_cls.return_value
client.check_document.return_value = False
else:
destination = "/home/bar.txt"
tasks = get_tasks_results()
second_task = copy.deepcopy(tasks[-1])
second_task["subtasks"] = []
tasks.append(second_task)
exporter = elastic.ElasticSearchExporter(tasks, destination)
result = exporter.generate()
if remote:
self.assertEqual(
[mock.call("rally_task_data_v1", second_task["uuid"]),
mock.call("rally_task_data_v1", second_task["uuid"])],
client.check_document.call_args_list
)
client.push_documents.assert_called_once_with(exporter._report)
client.list_indices.assert_called_once_with()
self.assertEqual(3, client.create_index.call_count)
else:
self.assertEqual({"files", "open"}, set(result.keys()))
self.assertEqual("file://%s" % destination, result["open"])
self.assertEqual({destination}, set(result["files"].keys()))
data = result["files"][destination].split("\n")
# the should be always empty line in the end
self.assertEqual("", data[-1])
data = [json.loads(line) for line in exporter._report]
self.assertIsInstance(data, list)
expected = [
{
"index": {"_id": "2fa4f5ff-7d23-4bb0-9b1f-8ee235f7f1c8",
"_index": "rally_task_data_v1",
"_type": "data"}
},
{
"title": "foo",
"description": "bar",
"deployment_uuid": "deployment-uuu-iii-iii-ddd",
"deployment_name": "env-name",
"status": "ok",
"pass_sla": "yup",
"task_uuid": "2fa4f5ff-7d23-4bb0-9b1f-8ee235f7f1c8",
"tags": ["tag-1", "tag-2"]
},
{
"index": {"_id": "4dcd88a5-164b-4431-8b44-3979868116dd",
"_index": "rally_workload_data_v1",
"_type": "data"}
},
{
"deployment_uuid": "deployment-uuu-iii-iii-ddd",
"deployment_name": "env-name",
"task_uuid": "2fa4f5ff-7d23-4bb0-9b1f-8ee235f7f1c8",
"subtask_uuid": "35166362-0b11-4e74-929d-6988377e2da2",
"scenario_name": "CinderVolumes.list_volumes",
"description": "List all volumes.",
"scenario_cfg": ["key1=value1"],
"contexts": ["users@openstack.tenants=2"],
"runner_name": "constant",
"runner_cfg": ["times=3", "type=constant"],
"full_duration": 127.59103488922119,
"load_duration": 97.82577991485596,
"started_at": "2017-07-28T23:35:50",
"pass_sla": False,
"success_rate": 0.8,
"sla_details": ["because i do not like you"]
},
{
"index": {
"_id": "4dcd88a5-164b-4431-8b44-3979868116dd_iter_1_action"
"_cinder.list_volumes_0",
"_index": "rally_atomic_action_data_v1",
"_type": "data"}
},
{
"deployment_uuid": "deployment-uuu-iii-iii-ddd",
"deployment_name": "env-name",
"action_name": "cinder.list_volumes",
"started_at": "2017-07-28T23:35:50",
"finished_at": "2017-07-28T23:36:01",
"duration": 11.096485137939453,
"contexts": ["users@openstack.tenants=2"],
"error": None,
"parent": None,
"runner_name": "constant",
"runner_cfg": ["times=3", "type=constant"],
"scenario_cfg": ["key1=value1"],
"success": True,
"workload_uuid": "4dcd88a5-164b-4431-8b44-3979868116dd"
},
{
"index": {
"_id": "4dcd88a5-164b-4431-8b44-3979868116dd_iter_1_action"
"_no-name-action_0",
"_index": "rally_atomic_action_data_v1",
"_type": "data"}
},
{
"deployment_uuid": "deployment-uuu-iii-iii-ddd",
"deployment_name": "env-name",
"action_name": "no-name-action",
"started_at": "2017-07-28T23:36:00",
"finished_at": "2017-07-28T23:36:00",
"duration": 0,
"contexts": ["users@openstack.tenants=2"],
"error": ["ERROR!"],
"parent": None,
"runner_name": "constant",
"runner_cfg": ["times=3", "type=constant"],
"scenario_cfg": ["key1=value1"],
"success": False,
"workload_uuid": "4dcd88a5-164b-4431-8b44-3979868116dd"
},
{
"index": {
"_id": "4dcd88a5-164b-4431-8b44-3979868116dd_iter_2_action"
"_cinder.list_volumes_0",
"_index": "rally_atomic_action_data_v1",
"_type": "data"}
},
{
"deployment_uuid": "deployment-uuu-iii-iii-ddd",
"deployment_name": "env-name",
"action_name": "cinder.list_volumes",
"started_at": "2017-07-28T23:35:50",
"finished_at": "2017-07-28T23:36:01",
"duration": 11.096485137939453,
"contexts": ["users@openstack.tenants=2"],
"error": ["ERROR!"],
"parent": None,
"runner_name": "constant",
"runner_cfg": ["times=3", "type=constant"],
"scenario_cfg": ["key1=value1"],
"success": False,
"workload_uuid": "4dcd88a5-164b-4431-8b44-3979868116dd"
},
{
"index": {"_id": "2fa4f5ff-7d23-4bb0-9b1f-8ee235f7f1c8",
"_index": "rally_task_data_v1",
"_type": "data"}
},
{
"deployment_uuid": "deployment-uuu-iii-iii-ddd",
"deployment_name": "env-name",
"title": "foo",
"description": "bar",
"status": "ok",
"pass_sla": "yup",
"task_uuid": "2fa4f5ff-7d23-4bb0-9b1f-8ee235f7f1c8",
"tags": ["tag-1", "tag-2"]}
]
for i, line in enumerate(expected):
if i == len(data):
self.fail("The next line is missed: %s" % line)
self.assertEqual(line, data[i], "Line #%s is wrong." % (i + 1))
| {
"repo_name": "openstack/rally",
"path": "tests/unit/plugins/task/exporters/elastic/test_exporter.py",
"copies": "1",
"size": "20745",
"license": "apache-2.0",
"hash": -7113831106247617000,
"line_mean": 39.2815533981,
"line_max": 79,
"alpha_frac": 0.4803567125,
"autogenerated": false,
"ratio": 3.896506386175808,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48768630986758077,
"avg_score": null,
"num_lines": null
} |
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from neutron_lib.api.definitions import segment
from neutron_lib.api.definitions import subnet
# The alias of the extension.
ALIAS = 'subnet-segmentid-writable'
# Whether or not this extension is simply signaling behavior to the user
# or it actively modifies the attribute map.
IS_SHIM_EXTENSION = False
# Whether the extension is marking the adoption of standardattr model for
# legacy resources, or introducing new standardattr attributes. False or
# None if the standardattr model is adopted since the introduction of
# resource extension.
# If this is True, the alias for the extension should be prefixed with
# 'standard-attr-'.
IS_STANDARD_ATTR_EXTENSION = False
# The name of the extension.
NAME = 'Subnet SegmentID (writable)'
# A prefix for API resources. An empty prefix means that the API is going
# to be exposed at the v2/ level as any other core resource.
API_PREFIX = ''
# The description of the extension.
DESCRIPTION = (
"Provides a writable segment_id attribute for a subnet resource.")
# A timestamp of when the extension was introduced.
UPDATED_TIMESTAMP = "2018-03-12T00:00:00-00:00"
segment_id_attr_info = copy.deepcopy(
segment.RESOURCE_ATTRIBUTE_MAP[
subnet.COLLECTION_NAME][segment.SEGMENT_ID])
segment_id_attr_info['allow_put'] = True
RESOURCE_ATTRIBUTE_MAP = {
subnet.COLLECTION_NAME: {
segment.SEGMENT_ID: segment_id_attr_info
}
}
# The subresource attribute map for the extension. It adds child resources
# to main extension's resource. The subresource map must have a parent and
# a parameters entry. If an extension does not need such a map, None can
# be specified (mandatory).
SUB_RESOURCE_ATTRIBUTE_MAP = {}
# The action map: it associates verbs with methods to be performed on
# the API resource.
ACTION_MAP = {}
# The action status.
ACTION_STATUS = {
}
# The list of required extensions.
REQUIRED_EXTENSIONS = [segment.ALIAS]
# The list of optional extensions.
OPTIONAL_EXTENSIONS = []
| {
"repo_name": "openstack/neutron-lib",
"path": "neutron_lib/api/definitions/subnet_segmentid_writable.py",
"copies": "1",
"size": "2583",
"license": "apache-2.0",
"hash": -3217354405545578500,
"line_mean": 30.8888888889,
"line_max": 78,
"alpha_frac": 0.7448703058,
"autogenerated": false,
"ratio": 3.849478390461997,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5094348696261997,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_log import log as logging
from oslo_utils import uuidutils
from neutron.api.v2 import attributes
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.callbacks import resources
from neutron.db import api as db_api
from neutron.db import common_db_mixin
from neutron.db import db_base_plugin_common
from neutron.db import db_base_plugin_v2
from neutron.extensions import portbindings
from neutron.objects import base as objects_base
from neutron.objects import trunk as trunk_objects
from neutron.services import service_base
from neutron.services.trunk import callbacks
from neutron.services.trunk import constants
from neutron.services.trunk import drivers
from neutron.services.trunk import exceptions as trunk_exc
from neutron.services.trunk import rules
from neutron.services.trunk.seg_types import validators
LOG = logging.getLogger(__name__)
def _extend_port_trunk_details(core_plugin, port_res, port_db):
"""Add trunk details to a port."""
if port_db.trunk_port:
subports = [{'segmentation_id': x.segmentation_id,
'segmentation_type': x.segmentation_type,
'port_id': x.port_id}
for x in port_db.trunk_port.sub_ports]
trunk_details = {'trunk_id': port_db.trunk_port.id,
'sub_ports': subports}
port_res['trunk_details'] = trunk_details
return port_res
class TrunkPlugin(service_base.ServicePluginBase,
common_db_mixin.CommonDbMixin):
supported_extension_aliases = ["trunk", "trunk-details"]
__native_pagination_support = True
__native_sorting_support = True
def __init__(self):
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, [_extend_port_trunk_details])
self._rpc_backend = None
self._drivers = []
self._segmentation_types = {}
self._interfaces = set()
self._agent_types = set()
drivers.register()
registry.subscribe(rules.enforce_port_deletion_rules,
resources.PORT, events.BEFORE_DELETE)
# NOTE(tidwellr) Consider keying off of PRECOMMIT_UPDATE if we find
# AFTER_UPDATE to be problematic for setting trunk status when a
# a parent port becomes unbound.
registry.subscribe(self._trigger_trunk_status_change,
resources.PORT, events.AFTER_UPDATE)
registry.notify(constants.TRUNK_PLUGIN, events.AFTER_INIT, self)
for driver in self._drivers:
LOG.debug('Trunk plugin loaded with driver %s', driver.name)
self.check_compatibility()
def check_compatibility(self):
"""Verify the plugin can load correctly and fail otherwise."""
self.check_driver_compatibility()
self.check_segmentation_compatibility()
def check_driver_compatibility(self):
"""Fail to load if no compatible driver is found."""
if not any([driver.is_loaded for driver in self._drivers]):
raise trunk_exc.IncompatibleTrunkPluginConfiguration()
def check_segmentation_compatibility(self):
"""Fail to load if segmentation type conflicts are found.
In multi-driver deployments each loaded driver must support the same
set of segmentation types consistently.
"""
# Get list of segmentation types for the loaded drivers.
list_of_driver_seg_types = [
set(driver.segmentation_types) for driver in self._drivers
if driver.is_loaded
]
# If not empty, check that there is at least one we can use.
compat_segmentation_types = set()
if list_of_driver_seg_types:
compat_segmentation_types = (
set.intersection(*list_of_driver_seg_types))
if not compat_segmentation_types:
raise trunk_exc.IncompatibleDriverSegmentationTypes()
# If there is at least one, make sure the validator is defined.
try:
for seg_type in compat_segmentation_types:
self.add_segmentation_type(
seg_type, validators.get_validator(seg_type))
except KeyError:
raise trunk_exc.SegmentationTypeValidatorNotFound(
seg_type=seg_type)
def set_rpc_backend(self, backend):
self._rpc_backend = backend
def is_rpc_enabled(self):
return self._rpc_backend is not None
def register_driver(self, driver):
"""Register driver with trunk plugin."""
if driver.agent_type:
self._agent_types.add(driver.agent_type)
self._interfaces = self._interfaces | set(driver.interfaces)
self._drivers.append(driver)
@property
def registered_drivers(self):
"""The registered drivers."""
return self._drivers
@property
def supported_interfaces(self):
"""A set of supported interfaces."""
return self._interfaces
@property
def supported_agent_types(self):
"""A set of supported agent types."""
return self._agent_types
def add_segmentation_type(self, segmentation_type, id_validator):
self._segmentation_types[segmentation_type] = id_validator
LOG.debug('Added support for segmentation type %s', segmentation_type)
def validate(self, context, trunk):
"""Return a valid trunk or raises an error if unable to do so."""
trunk_details = trunk
trunk_validator = rules.TrunkPortValidator(trunk['port_id'])
trunk_details['port_id'] = trunk_validator.validate(context)
subports_validator = rules.SubPortsValidator(
self._segmentation_types, trunk['sub_ports'], trunk['port_id'])
trunk_details['sub_ports'] = subports_validator.validate(context)
return trunk_details
def get_plugin_description(self):
return "Trunk port service plugin"
@classmethod
def get_plugin_type(cls):
return "trunk"
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_trunk(self, context, trunk_id, fields=None):
"""Return information for the specified trunk."""
return self._get_trunk(context, trunk_id)
@db_base_plugin_common.filter_fields
@db_base_plugin_common.convert_result_to_dict
def get_trunks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
"""Return information for available trunks."""
filters = filters or {}
pager = objects_base.Pager(sorts=sorts, limit=limit,
page_reverse=page_reverse, marker=marker)
return trunk_objects.Trunk.get_objects(context, _pager=pager,
**filters)
@db_base_plugin_common.convert_result_to_dict
def create_trunk(self, context, trunk):
"""Create a trunk."""
trunk = self.validate(context, trunk['trunk'])
sub_ports = [trunk_objects.SubPort(
context=context,
port_id=p['port_id'],
segmentation_id=p['segmentation_id'],
segmentation_type=p['segmentation_type'])
for p in trunk['sub_ports']]
admin_state_up = trunk.get('admin_state_up', True)
# NOTE(status_police): a trunk is created in DOWN status. Depending
# on the nature of the create request, a driver may set the status
# immediately to ACTIVE if no physical provisioning is required.
# Otherwise a transition to BUILD (or ERROR) should be expected
# depending on how the driver reacts. PRECOMMIT failures prevent the
# trunk from being created altogether.
trunk_description = trunk.get('description', "")
trunk_obj = trunk_objects.Trunk(context=context,
admin_state_up=admin_state_up,
id=uuidutils.generate_uuid(),
name=trunk.get('name', ""),
description=trunk_description,
tenant_id=trunk['tenant_id'],
port_id=trunk['port_id'],
status=constants.DOWN_STATUS,
sub_ports=sub_ports)
with db_api.autonested_transaction(context.session):
trunk_obj.create()
payload = callbacks.TrunkPayload(context, trunk_obj.id,
current_trunk=trunk_obj)
registry.notify(
constants.TRUNK, events.PRECOMMIT_CREATE, self,
payload=payload)
registry.notify(
constants.TRUNK, events.AFTER_CREATE, self, payload=payload)
return trunk_obj
@db_base_plugin_common.convert_result_to_dict
def update_trunk(self, context, trunk_id, trunk):
"""Update information for the specified trunk."""
trunk_data = trunk['trunk']
with db_api.autonested_transaction(context.session):
trunk_obj = self._get_trunk(context, trunk_id)
original_trunk = copy.deepcopy(trunk_obj)
# NOTE(status_police): a trunk status should not change during an
# update_trunk(), even in face of PRECOMMIT failures. This is
# because only name and admin_state_up are being affected, and
# these are DB properties only.
trunk_obj.update_fields(trunk_data, reset_changes=True)
trunk_obj.update()
payload = callbacks.TrunkPayload(context, trunk_id,
original_trunk=original_trunk,
current_trunk=trunk_obj)
registry.notify(constants.TRUNK, events.PRECOMMIT_UPDATE, self,
payload=payload)
registry.notify(constants.TRUNK, events.AFTER_UPDATE, self,
payload=payload)
return trunk_obj
def delete_trunk(self, context, trunk_id):
"""Delete the specified trunk."""
with db_api.autonested_transaction(context.session):
trunk = self._get_trunk(context, trunk_id)
rules.trunk_can_be_managed(context, trunk)
trunk_port_validator = rules.TrunkPortValidator(trunk.port_id)
if not trunk_port_validator.is_bound(context):
# NOTE(status_police): when a trunk is deleted, the logical
# object disappears from the datastore, therefore there is no
# status transition involved. If PRECOMMIT failures occur,
# the trunk remains in the status where it was.
trunk.delete()
payload = callbacks.TrunkPayload(context, trunk_id,
original_trunk=trunk)
registry.notify(constants.TRUNK, events.PRECOMMIT_DELETE, self,
payload=payload)
else:
raise trunk_exc.TrunkInUse(trunk_id=trunk_id)
registry.notify(constants.TRUNK, events.AFTER_DELETE, self,
payload=payload)
@db_base_plugin_common.convert_result_to_dict
def add_subports(self, context, trunk_id, subports):
"""Add one or more subports to trunk."""
with db_api.autonested_transaction(context.session):
trunk = self._get_trunk(context, trunk_id)
# Check for basic validation since the request body here is not
# automatically validated by the API layer.
subports = subports['sub_ports']
subports_validator = rules.SubPortsValidator(
self._segmentation_types, subports, trunk['port_id'])
subports = subports_validator.validate(
context, basic_validation=True)
added_subports = []
rules.trunk_can_be_managed(context, trunk)
original_trunk = copy.deepcopy(trunk)
# NOTE(status_police): the trunk status should transition to
# DOWN (and finally in ACTIVE or ERROR), only if it is not in
# ERROR status already. A user should attempt to resolve the ERROR
# condition before adding more subports to the trunk. Should a
# trunk be in DOWN or BUILD state (e.g. when dealing with
# multiple concurrent requests), the status is still forced to
# DOWN and thus can potentially overwrite an interleaving state
# change to ACTIVE. Eventually the driver should bring the status
# back to ACTIVE or ERROR.
if trunk.status == constants.ERROR_STATUS:
raise trunk_exc.TrunkInErrorState(trunk_id=trunk_id)
else:
trunk.update(status=constants.DOWN_STATUS)
for subport in subports:
obj = trunk_objects.SubPort(
context=context,
trunk_id=trunk_id,
port_id=subport['port_id'],
segmentation_type=subport['segmentation_type'],
segmentation_id=subport['segmentation_id'])
obj.create()
trunk['sub_ports'].append(obj)
added_subports.append(obj)
payload = callbacks.TrunkPayload(context, trunk_id,
current_trunk=trunk,
original_trunk=original_trunk,
subports=added_subports)
if added_subports:
registry.notify(constants.SUBPORTS, events.PRECOMMIT_CREATE,
self, payload=payload)
if added_subports:
registry.notify(
constants.SUBPORTS, events.AFTER_CREATE, self, payload=payload)
return trunk
@db_base_plugin_common.convert_result_to_dict
def remove_subports(self, context, trunk_id, subports):
"""Remove one or more subports from trunk."""
subports = subports['sub_ports']
with db_api.autonested_transaction(context.session):
trunk = self._get_trunk(context, trunk_id)
original_trunk = copy.deepcopy(trunk)
rules.trunk_can_be_managed(context, trunk)
subports_validator = rules.SubPortsValidator(
self._segmentation_types, subports)
# the subports are being removed, therefore we do not need to
# enforce any specific trunk rules, other than basic validation
# of the request body.
subports = subports_validator.validate(
context, basic_validation=True,
trunk_validation=False)
current_subports = {p.port_id: p for p in trunk.sub_ports}
removed_subports = []
for subport in subports:
subport_obj = current_subports.pop(subport['port_id'], None)
if not subport_obj:
raise trunk_exc.SubPortNotFound(trunk_id=trunk_id,
port_id=subport['port_id'])
subport_obj.delete()
removed_subports.append(subport_obj)
del trunk.sub_ports[:]
trunk.sub_ports.extend(current_subports.values())
# NOTE(status_police): the trunk status should transition to
# DOWN irrespective of the status in which it is in to allow
# the user to resolve potential conflicts due to prior add_subports
# operations.
# Should a trunk be in DOWN or BUILD state (e.g. when dealing
# with multiple concurrent requests), the status is still forced
# to DOWN. See add_subports() for more details.
trunk.update(status=constants.DOWN_STATUS)
payload = callbacks.TrunkPayload(context, trunk_id,
current_trunk=trunk,
original_trunk=original_trunk,
subports=removed_subports)
if removed_subports:
registry.notify(constants.SUBPORTS, events.PRECOMMIT_DELETE,
self, payload=payload)
if removed_subports:
registry.notify(
constants.SUBPORTS, events.AFTER_DELETE, self, payload=payload)
return trunk
@db_base_plugin_common.filter_fields
def get_subports(self, context, trunk_id, fields=None):
"""Return subports for the specified trunk."""
trunk = self.get_trunk(context, trunk_id)
return {'sub_ports': trunk['sub_ports']}
def _get_trunk(self, context, trunk_id):
"""Return the trunk object or raise if not found."""
obj = trunk_objects.Trunk.get_object(context, id=trunk_id)
if obj is None:
raise trunk_exc.TrunkNotFound(trunk_id=trunk_id)
return obj
def _trigger_trunk_status_change(self, resource, event, trigger, **kwargs):
updated_port = kwargs['port']
trunk_details = updated_port.get('trunk_details')
# If no trunk_details, the port is not the parent of a trunk.
if not trunk_details:
return
context = kwargs['context']
original_port = kwargs['original_port']
orig_vif_type = original_port.get(portbindings.VIF_TYPE)
new_vif_type = updated_port.get(portbindings.VIF_TYPE)
vif_type_changed = orig_vif_type != new_vif_type
if vif_type_changed and new_vif_type == portbindings.VIF_TYPE_UNBOUND:
trunk = self._get_trunk(context, trunk_details['trunk_id'])
# NOTE(status_police) Trunk status goes to DOWN when the parent
# port is unbound. This means there are no more physical resources
# associated with the logical resource.
trunk.update(status=constants.DOWN_STATUS)
| {
"repo_name": "igor-toga/local-snat",
"path": "neutron/services/trunk/plugin.py",
"copies": "1",
"size": "18820",
"license": "apache-2.0",
"hash": 4622704024288901000,
"line_mean": 45.0146699267,
"line_max": 79,
"alpha_frac": 0.6023379384,
"autogenerated": false,
"ratio": 4.482019528459157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 409
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import six
from rally.plugins.openstack.context.ceilometer import samples
from rally.plugins.openstack.scenarios.ceilometer import utils as ceilo_utils
from tests.unit import test
CTX = "rally.plugins.openstack.context.ceilometer"
class CeilometerSampleGeneratorTestCase(test.TestCase):
def _gen_tenants(self, count):
tenants = {}
for id_ in range(count):
tenants[str(id_)] = {"name": str(id_)}
return tenants
def _gen_context(self, tenants_count, users_per_tenant,
resources_per_tenant, samples_per_resource):
tenants = self._gen_tenants(tenants_count)
users = []
for id_ in tenants.keys():
for i in range(users_per_tenant):
users.append({"id": i, "tenant_id": id_,
"endpoint": mock.MagicMock()})
context = test.get_test_context()
context.update({
"config": {
"users": {
"tenants": tenants_count,
"users_per_tenant": users_per_tenant,
"concurrent": 10,
},
"ceilometer": {
"counter_name": "fake-counter-name",
"counter_type": "fake-counter-type",
"counter_unit": "fake-counter-unit",
"counter_volume": 100,
"resources_per_tenant": resources_per_tenant,
"samples_per_resource": samples_per_resource,
"timestamp_interval": 60,
"metadata_list": [
{"status": "active", "name": "fake_resource",
"deleted": "False",
"created_at": "2015-09-04T12:34:19.000000"},
{"status": "not_active", "name": "fake_resource_1",
"deleted": "False",
"created_at": "2015-09-10T06:55:12.000000"}
]
}
},
"admin": {
"endpoint": mock.MagicMock()
},
"users": users,
"tenants": tenants
})
return tenants, context
def test_init(self):
context = {}
context["task"] = mock.MagicMock()
context["config"] = {
"ceilometer": {
"counter_name": "cpu_util",
"counter_type": "gauge",
"counter_unit": "instance",
"counter_volume": 1.0,
"resources_per_tenant": 5,
"samples_per_resource": 5,
"timestamp_intervals": 60,
"metadata_list": [
{"status": "active", "name": "fake_resource",
"deleted": "False",
"created_at": "2015-09-04T12:34:19.000000"},
{"status": "not_active", "name": "fake_resource_1",
"deleted": "False",
"created_at": "2015-09-10T06:55:12.000000"}
]
}
}
inst = samples.CeilometerSampleGenerator(context)
self.assertEqual(inst.config, context["config"]["ceilometer"])
def test_setup(self):
tenants_count = 2
users_per_tenant = 2
resources_per_tenant = 2
samples_per_resource = 2
tenants, real_context = self._gen_context(
tenants_count, users_per_tenant,
resources_per_tenant, samples_per_resource)
scenario = ceilo_utils.CeilometerScenario(real_context)
sample = {
"counter_name": "fake-counter-name",
"counter_type": "fake-counter-type",
"counter_unit": "fake-counter-unit",
"counter_volume": 100,
"resource_id": "fake-resource-id",
"metadata_list": [
{"status": "active", "name": "fake_resource",
"deleted": "False",
"created_at": "2015-09-04T12:34:19.000000"},
{"status": "not_active", "name": "fake_resource_1",
"deleted": "False",
"created_at": "2015-09-10T06:55:12.000000"}
]
}
scenario.generate_random_name = mock.Mock(
return_value="fake_resource-id")
kwargs = copy.deepcopy(sample)
kwargs.pop("resource_id")
samples_to_create = scenario._make_samples(count=samples_per_resource,
interval=60, **kwargs)
new_context = copy.deepcopy(real_context)
for id_ in tenants.keys():
new_context["tenants"][id_].setdefault("samples", [])
new_context["tenants"][id_].setdefault("resources", [])
for i in six.moves.xrange(resources_per_tenant):
for sample in samples_to_create:
new_context["tenants"][id_]["samples"].append(sample)
new_context["tenants"][id_]["resources"].append(
sample["resource_id"])
with mock.patch("%s.samples.ceilo_utils.CeilometerScenario"
"._create_samples" % CTX) as mock_create_samples:
mock_create_samples.return_value = []
for i, sample in enumerate(samples_to_create):
sample_object = mock.MagicMock(resource_id="fake_resource-id")
sample_object.to_dict.return_value = sample
mock_create_samples.return_value.append(sample_object)
ceilometer_ctx = samples.CeilometerSampleGenerator(real_context)
ceilometer_ctx.setup()
self.assertEqual(new_context, ceilometer_ctx.context)
def test_cleanup(self):
tenants, context = self._gen_context(2, 5, 3, 3)
ceilometer_ctx = samples.CeilometerSampleGenerator(context)
ceilometer_ctx.cleanup()
| {
"repo_name": "group-policy/rally",
"path": "tests/unit/plugins/openstack/context/ceilometer/test_samples.py",
"copies": "1",
"size": "6456",
"license": "apache-2.0",
"hash": 2631951093707958000,
"line_mean": 40.1210191083,
"line_max": 78,
"alpha_frac": 0.5309789343,
"autogenerated": false,
"ratio": 4.20586319218241,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 157
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from rally.plugins.openstack.scenarios.ceilometer import utils
from tests.unit import test
CEILOMETER_UTILS = "rally.plugins.openstack.scenarios.ceilometer.utils"
class CeilometerScenarioTestCase(test.ScenarioTestCase):
def setUp(self):
super(CeilometerScenarioTestCase, self).setUp()
self.scenario = utils.CeilometerScenario(self.context)
def test__make_samples(self):
self.scenario.generate_random_name = mock.Mock(
return_value="fake_resource")
result = self.scenario._make_samples(project_id="fake_project_id")
expected = [{"counter_name": "cpu_util",
"counter_type": "gauge",
"counter_unit": "%",
"counter_volume": 1,
"resource_id": "fake_resource",
"project_id": "fake_project_id",
}]
self.assertEqual(expected, result)
def test__list_alarms_by_id(self):
self.assertEqual(self.clients("ceilometer").alarms.get.return_value,
self.scenario._list_alarms("alarm-id"))
self.clients("ceilometer").alarms.get.assert_called_once_with(
"alarm-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_alarms")
def test__list_alarms(self):
self.assertEqual(self.clients("ceilometer").alarms.list.return_value,
self.scenario._list_alarms())
self.clients("ceilometer").alarms.list.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_alarms")
def test__create_alarm(self):
alarm_dict = {"alarm_id": "fake-alarm-id"}
orig_alarm_dict = copy.copy(alarm_dict)
self.scenario.generate_random_name = mock.Mock()
self.assertEqual(self.scenario._create_alarm("fake-meter-name", 100,
alarm_dict),
self.clients("ceilometer").alarms.create.return_value)
self.clients("ceilometer").alarms.create.assert_called_once_with(
meter_name="fake-meter-name",
threshold=100,
description="Test Alarm",
alarm_id="fake-alarm-id",
name=self.scenario.generate_random_name.return_value)
# ensure that _create_alarm() doesn't modify the alarm dict as
# a side-effect
self.assertDictEqual(alarm_dict, orig_alarm_dict)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.create_alarm")
def test__delete_alarms(self):
self.scenario._delete_alarm("alarm-id")
self.clients("ceilometer").alarms.delete.assert_called_once_with(
"alarm-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.delete_alarm")
def test__update_alarm(self):
alarm_diff = {"description": "Changed Test Description"}
orig_alarm_diff = copy.copy(alarm_diff)
self.scenario._update_alarm("alarm-id", alarm_diff)
self.clients("ceilometer").alarms.update.assert_called_once_with(
"alarm-id", **alarm_diff)
# ensure that _create_alarm() doesn't modify the alarm dict as
# a side-effect
self.assertDictEqual(alarm_diff, orig_alarm_diff)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.update_alarm")
def test__get_alarm_history(self):
self.assertEqual(
self.scenario._get_alarm_history("alarm-id"),
self.clients("ceilometer").alarms.get_history.return_value)
self.clients("ceilometer").alarms.get_history.assert_called_once_with(
"alarm-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.get_alarm_history")
def test__get_alarm_state(self):
self.assertEqual(
self.scenario._get_alarm_state("alarm-id"),
self.clients("ceilometer").alarms.get_state.return_value)
self.clients("ceilometer").alarms.get_state.assert_called_once_with(
"alarm-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.get_alarm_state")
def test__set_alarm_state(self):
alarm = mock.Mock()
self.clients("ceilometer").alarms.create.return_value = alarm
return_alarm = self.scenario._set_alarm_state(alarm, "ok", 100)
self.mock_wait_for.mock.assert_called_once_with(
alarm,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=100, check_interval=1)
self.mock_resource_is.mock.assert_called_once_with("ok")
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_alarm)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.set_alarm_state")
def test__list_events(self):
self.assertEqual(
self.scenario._list_events(),
self.admin_clients("ceilometer").events.list.return_value
)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_events")
def test__get_events(self):
self.assertEqual(
self.scenario._get_event(event_id="fake_id"),
self.admin_clients("ceilometer").events.get.return_value
)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.get_event")
def test__list_event_types(self):
self.assertEqual(
self.scenario._list_event_types(),
self.admin_clients("ceilometer").event_types.list.return_value
)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_event_types")
def test__list_event_traits(self):
self.assertEqual(
self.scenario._list_event_traits(
event_type="fake_event_type", trait_name="fake_trait_name"),
self.admin_clients("ceilometer").traits.list.return_value
)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_event_traits")
def test__list_event_trait_descriptions(self):
self.assertEqual(
self.scenario._list_event_trait_descriptions(
event_type="fake_event_type"
),
self.admin_clients("ceilometer").trait_descriptions.list.
return_value
)
self._test_atomic_action_timer(
self.scenario.atomic_actions(),
"ceilometer.list_event_trait_descriptions")
def test__list_meters(self):
self.assertEqual(self.scenario._list_meters(),
self.clients("ceilometer").meters.list.return_value)
self.clients("ceilometer").meters.list.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_meters")
def test__list_resources(self):
self.assertEqual(
self.scenario._list_resources(),
self.clients("ceilometer").resources.list.return_value)
self.clients("ceilometer").resources.list.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_resources")
def test__list_samples(self):
self.assertEqual(
self.scenario._list_samples(),
self.clients("ceilometer").samples.list.return_value)
self.clients("ceilometer").samples.list.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_samples")
def test__get_resource(self):
self.assertEqual(self.scenario._get_resource("fake-resource-id"),
self.clients("ceilometer").resources.get.return_value)
self.clients("ceilometer").resources.get.assert_called_once_with(
"fake-resource-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.get_resource")
def test__get_stats(self):
self.assertEqual(
self.scenario._get_stats("fake-meter"),
self.clients("ceilometer").statistics.list.return_value)
self.clients("ceilometer").statistics.list.assert_called_once_with(
"fake-meter")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.get_stats")
def test__create_meter(self):
self.scenario.generate_random_name = mock.Mock()
self.assertEqual(
self.scenario._create_meter(fakearg="fakearg"),
self.clients("ceilometer").samples.create.return_value[0])
self.clients("ceilometer").samples.create.assert_called_once_with(
counter_name=self.scenario.generate_random_name.return_value,
fakearg="fakearg")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.create_meter")
def test__query_alarms(self):
self.assertEqual(
self.scenario._query_alarms("fake-filter", "fake-orderby", 10),
self.clients("ceilometer").query_alarms.query.return_value)
self.clients("ceilometer").query_alarms.query.assert_called_once_with(
"fake-filter", "fake-orderby", 10)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.query_alarms")
def test__query_alarm_history(self):
self.assertEqual(
self.scenario._query_alarm_history(
"fake-filter", "fake-orderby", 10),
self.clients("ceilometer").query_alarm_history.query.return_value)
self.clients(
"ceilometer").query_alarm_history.query.assert_called_once_with(
"fake-filter", "fake-orderby", 10)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.query_alarm_history")
def test__query_samples(self):
self.assertEqual(
self.scenario._query_samples("fake-filter", "fake-orderby", 10),
self.clients("ceilometer").query_samples.query.return_value)
self.clients("ceilometer").query_samples.query.assert_called_once_with(
"fake-filter", "fake-orderby", 10)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.query_samples")
def test__create_sample_no_resource_id(self):
self.scenario.generate_random_name = mock.Mock()
created_sample = self.scenario._create_sample("test-counter-name",
"test-counter-type",
"test-counter-unit",
"test-counter-volume")
self.assertEqual(
created_sample,
self.clients("ceilometer").samples.create.return_value)
self.clients("ceilometer").samples.create.assert_called_once_with(
counter_name="test-counter-name",
counter_type="test-counter-type",
counter_unit="test-counter-unit",
counter_volume="test-counter-volume",
resource_id=self.scenario.generate_random_name.return_value)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.create_sample")
def test__create_sample(self):
created_sample = self.scenario._create_sample("test-counter-name",
"test-counter-type",
"test-counter-unit",
"test-counter-volume",
"test-resource-id")
self.assertEqual(
created_sample,
self.clients("ceilometer").samples.create.return_value)
self.clients("ceilometer").samples.create.assert_called_once_with(
counter_name="test-counter-name",
counter_type="test-counter-type",
counter_unit="test-counter-unit",
counter_volume="test-counter-volume",
resource_id="test-resource-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.create_sample")
| {
"repo_name": "paboldin/rally",
"path": "tests/unit/plugins/openstack/scenarios/ceilometer/test_utils.py",
"copies": "1",
"size": "13812",
"license": "apache-2.0",
"hash": -336743745842342460,
"line_mean": 46.9583333333,
"line_max": 79,
"alpha_frac": 0.5836229366,
"autogenerated": false,
"ratio": 4.193078324225866,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5276701260825866,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from rally.plugins.openstack.scenarios.monasca import metrics
from tests.unit import test
@ddt.ddt
class MonascaMetricsTestCase(test.ScenarioTestCase):
@ddt.data(
{"region": None},
{"region": "fake_region"},
)
@ddt.unpack
def test_list_metrics(self, region=None):
scenario = metrics.ListMetrics(self.context)
self.region = region
scenario._list_metrics = mock.MagicMock()
scenario.run(region=self.region)
scenario._list_metrics.assert_called_once_with(region=self.region)
| {
"repo_name": "vganapath/rally",
"path": "tests/unit/plugins/openstack/scenarios/monasca/test_metrics.py",
"copies": "2",
"size": "1137",
"license": "apache-2.0",
"hash": -2549956793733399600,
"line_mean": 31.4857142857,
"line_max": 75,
"alpha_frac": 0.7238346526,
"autogenerated": false,
"ratio": 3.9072164948453607,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5631051147445361,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from rally.plugins.openstack.services.image import glance_v1
from rally.plugins.openstack.services.image import glance_v2
from rally.plugins.openstack.services.image import image
from tests.unit import test
@ddt.ddt
class ImageTestCase(test.TestCase):
def setUp(self):
super(ImageTestCase, self).setUp()
self.clients = mock.MagicMock()
def get_service_with_fake_impl(self):
path = "rally.plugins.openstack.services.image.image"
with mock.patch("%s.Image.discover_impl" % path) as mock_discover:
mock_discover.return_value = mock.MagicMock(), None
service = image.Image(self.clients)
return service
@ddt.data(("image_name", "container_format", "image_location",
"disk_format", "visibility", "min_disk", "min_ram"))
def test_create_image(self, params):
(image_name, container_format, image_location, disk_format,
visibility, min_disk, min_ram) = params
service = self.get_service_with_fake_impl()
properties = {"fakeprop": "fake"}
service.create_image(image_name=image_name,
container_format=container_format,
image_location=image_location,
disk_format=disk_format,
visibility=visibility,
min_disk=min_disk,
min_ram=min_ram,
properties=properties)
service._impl.create_image.assert_called_once_with(
image_name=image_name, container_format=container_format,
image_location=image_location, disk_format=disk_format,
visibility=visibility, min_disk=min_disk, min_ram=min_ram,
properties=properties)
@ddt.data(("image_id", "image_name", "min_disk", "min_ram",
"remove_props"))
def test_update_image(self, params):
(image_id, image_name, min_disk, min_ram, remove_props) = params
service = self.get_service_with_fake_impl()
service.update_image(image_id,
image_name=image_name,
min_disk=min_disk,
min_ram=min_ram,
remove_props=remove_props)
service._impl.update_image.assert_called_once_with(
image_id, image_name=image_name, min_disk=min_disk,
min_ram=min_ram, remove_props=remove_props)
@ddt.data("image_id")
def test_get_image(self, param):
image_id = param
service = self.get_service_with_fake_impl()
service.get_image(image=image_id)
service._impl.get_image.assert_called_once_with(image_id)
@ddt.data(("status", "visibility", "owner"))
def test_list_images(self, params):
status, visibility, owner = params
service = self.get_service_with_fake_impl()
service.list_images(status=status, visibility=visibility, owner=owner)
service._impl.list_images.assert_called_once_with(
status=status, visibility=visibility, owner=owner)
@ddt.data(("image_id", "visibility"))
def test_set_visibility(self, params):
image_id, visibility = params
service = self.get_service_with_fake_impl()
service.set_visibility(image_id=image_id, visibility=visibility)
service._impl.set_visibility.assert_called_once_with(
image_id, visibility=visibility)
def test_delete_image(self):
image_id = "image_id"
service = self.get_service_with_fake_impl()
service.delete_image(image_id=image_id)
service._impl.delete_image.assert_called_once_with(image_id)
def test_is_applicable(self):
clients = mock.Mock()
clients.glance().version = "1.0"
self.assertTrue(
glance_v1.UnifiedGlanceV1Service.is_applicable(clients))
clients.glance().version = "2.0"
self.assertTrue(
glance_v2.UnifiedGlanceV2Service.is_applicable(clients))
| {
"repo_name": "yeming233/rally",
"path": "tests/unit/plugins/openstack/services/image/test_image.py",
"copies": "1",
"size": "4663",
"license": "apache-2.0",
"hash": -2592085340009756000,
"line_mean": 40.2654867257,
"line_max": 78,
"alpha_frac": 0.624276217,
"autogenerated": false,
"ratio": 3.8923205342237064,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 113
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from rally.plugins.openstack.scenarios.monasca import utils
from tests.unit import test
@ddt.ddt
class MonascaScenarioTestCase(test.ScenarioTestCase):
def setUp(self):
super(MonascaScenarioTestCase, self).setUp()
self.scenario = utils.MonascaScenario(self.context)
self.kwargs = {
"dimensions": {
"region": "fake_region",
"hostname": "fake_host_name",
"service": "fake_service",
"url": "fake_url"
}
}
def test_list_metrics(self):
return_metric_value = self.scenario._list_metrics()
self.assertEqual(return_metric_value,
self.clients("monasca").metrics.list.return_value)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"monasca.list_metrics")
@ddt.data(
{"name": ""},
{"name": "fake_metric"},
)
@ddt.unpack
def test_create_metrics(self, name=None):
self.name = name
self.scenario._create_metrics(name=self.name, kwargs=self.kwargs)
self.assertEqual(1, self.clients("monasca").metrics.create.call_count)
| {
"repo_name": "gluke77/rally",
"path": "tests/unit/plugins/openstack/scenarios/monasca/test_utils.py",
"copies": "4",
"size": "1773",
"license": "apache-2.0",
"hash": -2623253264387317000,
"line_mean": 33.7647058824,
"line_max": 78,
"alpha_frac": 0.6407219402,
"autogenerated": false,
"ratio": 3.94,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6580721940200001,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import logging
__all__ = ['get_logger']
DEFAULT_LOG_FORMAT = ('%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(message)s')
LOGGER = None
class Logger(logging.Logger):
def __init__(self, debug=False, logfile=None):
logging.Logger.__init__(self, 'PixieBot')
try:
if logfile is not None:
self.handler = logging.FileHandler(logfile)
else:
self.handler = logging.StreamHandler()
formatter = logging.Formatter(DEFAULT_LOG_FORMAT)
self.handler.setFormatter(formatter)
self.addHandler(self.handler)
if debug:
self.setLevel(logging.DEBUG)
else:
self.setLevel(logging.INFO)
except IOError as e:
if e.errno == errno.EACCES:
pass
def get_logger():
global LOGGER
if LOGGER is None:
LOGGER = Logger(debug=True)
return LOGGER
| {
"repo_name": "umago/pixiebot",
"path": "pixiebot/log.py",
"copies": "1",
"size": "1600",
"license": "apache-2.0",
"hash": -7733280411306027000,
"line_mean": 28.0909090909,
"line_max": 78,
"alpha_frac": 0.615625,
"autogenerated": false,
"ratio": 4.18848167539267,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.530410667539267,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from rally.benchmark.context.sahara import sahara_cluster
from rally import exceptions
from tests.unit import test
CONF = cfg.CONF
BASE_CTX = "rally.benchmark.context"
CTX = "rally.benchmark.context.sahara"
SCN = "rally.benchmark.scenarios"
class SaharaClusterTestCase(test.TestCase):
def setUp(self):
super(SaharaClusterTestCase, self).setUp()
self.tenants_num = 2
self.users_per_tenant = 2
self.users = self.tenants_num * self.users_per_tenant
self.task = mock.MagicMock()
self.tenants = {}
self.users_key = []
for i in range(self.tenants_num):
self.tenants[str(i)] = {"id": str(i), "name": str(i),
"sahara_image": "42"}
for j in range(self.users_per_tenant):
self.users_key.append({"id": "%s_%s" % (str(i), str(j)),
"tenant_id": str(i),
"endpoint": "endpoint"})
CONF.set_override("cluster_check_interval", 0, "benchmark")
@property
def context_without_cluster_keys(self):
return {
"config": {
"users": {
"tenants": self.tenants_num,
"users_per_tenant": self.users_per_tenant,
},
"sahara_cluster": {
"flavor_id": "test_flavor",
"workers_count": 2,
"plugin_name": "test_plugin",
"hadoop_version": "test_version"
}
},
"admin": {"endpoint": mock.MagicMock()},
"task": mock.MagicMock(),
"users": self.users_key,
"tenants": self.tenants
}
@mock.patch("%s.sahara_cluster.resource_manager.cleanup" % CTX)
@mock.patch("%s.sahara_cluster.utils.SaharaScenario._launch_cluster" % CTX,
return_value=mock.MagicMock(id=42))
@mock.patch("%s.sahara_cluster.osclients" % CTX)
def test_setup_and_cleanup(self, mock_osclients,
mock_launch, mock_cleanup):
mock_sahara = mock_osclients.Clients(mock.MagicMock()).sahara()
ctx = self.context_without_cluster_keys
sahara_ctx = sahara_cluster.SaharaCluster(ctx)
launch_cluster_calls = []
for i in self.tenants:
launch_cluster_calls.append(mock.call(
plugin_name="test_plugin",
hadoop_version="test_version",
flavor_id="test_flavor",
workers_count=2,
image_id=ctx["tenants"][i]["sahara_image"],
floating_ip_pool=None,
volumes_per_node=None,
volumes_size=1,
auto_security_group=True,
security_groups=None,
node_configs=None,
cluster_configs=None,
enable_anti_affinity=False,
wait_active=False
))
mock_sahara.clusters.get.side_effect = [
mock.MagicMock(status="not-active"),
mock.MagicMock(status="active")]
sahara_ctx.setup()
mock_launch.assert_has_calls(launch_cluster_calls)
sahara_ctx.cleanup()
mock_cleanup.assert_called_once_with(names=["sahara.clusters"],
users=ctx["users"])
@mock.patch("%s.sahara_cluster.utils.SaharaScenario._launch_cluster" % CTX,
return_value=mock.MagicMock(id=42))
@mock.patch("%s.sahara_cluster.osclients" % CTX)
def test_setup_and_cleanup_error(self, mock_osclients, mock_launch):
mock_sahara = mock_osclients.Clients(mock.MagicMock()).sahara()
ctx = self.context_without_cluster_keys
sahara_ctx = sahara_cluster.SaharaCluster(ctx)
launch_cluster_calls = []
for i in self.tenants:
launch_cluster_calls.append(mock.call(
plugin_name="test_plugin",
hadoop_version="test_version",
flavor_id="test_flavor",
workers_count=2,
image_id=ctx["tenants"][i]["sahara_image"],
floating_ip_pool=None,
volumes_per_node=None,
volumes_size=1,
auto_security_groups=True,
security_groups=None,
node_configs=None,
cluster_configs=None,
wait_active=False
))
mock_sahara.clusters.get.side_effect = [
mock.MagicMock(status="not-active"),
mock.MagicMock(status="error")]
self.assertRaises(exceptions.SaharaClusterFailure, sahara_ctx.setup)
| {
"repo_name": "pandeyop/rally",
"path": "tests/unit/benchmark/context/sahara/test_sahara_cluster.py",
"copies": "2",
"size": "5312",
"license": "apache-2.0",
"hash": 3806425754344365600,
"line_mean": 35.1360544218,
"line_max": 79,
"alpha_frac": 0.5606174699,
"autogenerated": false,
"ratio": 4.009056603773585,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5569674073673585,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from rally.plugins.openstack.services.storage import cinder_v1
from tests.unit import fakes
from tests.unit import test
BASE_PATH = "rally.plugins.openstack.services.storage"
CONF = cfg.CONF
class CinderV1ServiceTestCase(test.ScenarioTestCase):
def setUp(self):
super(CinderV1ServiceTestCase, self).setUp()
self.clients = mock.MagicMock()
self.cinder = self.clients.cinder.return_value
self.name_generator = mock.MagicMock()
self.service = cinder_v1.CinderV1Service(
self.clients, name_generator=self.name_generator)
def atomic_actions(self):
return self.service._atomic_actions
def test_create_volume(self):
self.service.generate_random_name = mock.MagicMock(
return_value="volume")
self.service._wait_available_volume = mock.MagicMock()
self.service._wait_available_volume.return_value = fakes.FakeVolume()
return_volume = self.service.create_volume(1)
kwargs = {"display_name": "volume",
"display_description": None,
"snapshot_id": None,
"source_volid": None,
"volume_type": None,
"user_id": None,
"project_id": None,
"availability_zone": None,
"metadata": None,
"imageRef": None}
self.cinder.volumes.create.assert_called_once_with(1, **kwargs)
self.service._wait_available_volume.assert_called_once_with(
self.cinder.volumes.create.return_value)
self.assertEqual(self.service._wait_available_volume.return_value,
return_volume)
self._test_atomic_action_timer(self.atomic_actions(),
"cinder_v1.create_volume")
@mock.patch("%s.cinder_v1.random" % BASE_PATH)
def test_create_volume_with_size_range(self, mock_random):
mock_random.randint.return_value = 3
self.service._wait_available_volume = mock.MagicMock()
self.service._wait_available_volume.return_value = fakes.FakeVolume()
return_volume = self.service.create_volume(
size={"min": 1, "max": 5}, display_name="volume")
kwargs = {"display_name": "volume",
"display_description": None,
"snapshot_id": None,
"source_volid": None,
"volume_type": None,
"user_id": None,
"project_id": None,
"availability_zone": None,
"metadata": None,
"imageRef": None}
self.cinder.volumes.create.assert_called_once_with(
3, **kwargs)
self.service._wait_available_volume.assert_called_once_with(
self.cinder.volumes.create.return_value)
self.assertEqual(self.service._wait_available_volume.return_value,
return_volume)
def test_update_volume(self):
return_value = {"volume": fakes.FakeVolume()}
self.cinder.volumes.update.return_value = return_value
self.assertEqual(return_value["volume"],
self.service.update_volume(1))
self.cinder.volumes.update.assert_called_once_with(1)
self._test_atomic_action_timer(self.atomic_actions(),
"cinder_v1.update_volume")
def test_update_volume_with_name_description(self):
return_value = {"volume": fakes.FakeVolume()}
self.cinder.volumes.update.return_value = return_value
return_volume = self.service.update_volume(
1, display_name="volume", display_description="fake")
self.cinder.volumes.update.assert_called_once_with(
1, display_name="volume", display_description="fake")
self.assertEqual(return_value["volume"], return_volume)
self._test_atomic_action_timer(self.atomic_actions(),
"cinder_v1.update_volume")
def test_list_types(self):
self.assertEqual(self.cinder.volume_types.list.return_value,
self.service.list_types(search_opts=None))
self.cinder.volume_types.list.assert_called_once_with(None)
self._test_atomic_action_timer(self.atomic_actions(),
"cinder_v1.list_types")
def test_create_snapshot(self):
self.service._wait_available_volume = mock.MagicMock()
self.service._wait_available_volume.return_value = fakes.FakeVolume()
self.service.generate_random_name = mock.MagicMock(
return_value="snapshot")
return_snapshot = self.service.create_snapshot(1)
self.cinder.volume_snapshots.create.assert_called_once_with(
1, display_name="snapshot", display_description=None,
force=False)
self.service._wait_available_volume.assert_called_once_with(
self.cinder.volume_snapshots.create.return_value)
self.assertEqual(self.service._wait_available_volume.return_value,
return_snapshot)
self._test_atomic_action_timer(self.atomic_actions(),
"cinder_v1.create_snapshot")
def test_create_snapshot_with_name(self):
self.service._wait_available_volume = mock.MagicMock()
self.service._wait_available_volume.return_value = fakes.FakeVolume()
return_snapshot = self.service.create_snapshot(
1, display_name="snapshot")
self.cinder.volume_snapshots.create.assert_called_once_with(
1, display_name="snapshot", display_description=None,
force=False)
self.service._wait_available_volume.assert_called_once_with(
self.cinder.volume_snapshots.create.return_value)
self.assertEqual(self.service._wait_available_volume.return_value,
return_snapshot)
self._test_atomic_action_timer(self.atomic_actions(),
"cinder_v1.create_snapshot")
def test_create_backup(self):
self.service._wait_available_volume = mock.MagicMock()
self.service._wait_available_volume.return_value = fakes.FakeVolume()
self.service.generate_random_name = mock.MagicMock(
return_value="backup")
return_backup = self.service.create_backup(1)
self.cinder.backups.create.assert_called_once_with(
1, name="backup", description=None, container=None)
self.service._wait_available_volume.assert_called_once_with(
self.cinder.backups.create.return_value)
self.assertEqual(self.service._wait_available_volume.return_value,
return_backup)
self._test_atomic_action_timer(self.atomic_actions(),
"cinder_v1.create_backup")
def test_create_backup_with_name(self):
self.service._wait_available_volume = mock.MagicMock()
self.service._wait_available_volume.return_value = fakes.FakeVolume()
return_backup = self.service.create_backup(1, name="backup")
self.cinder.backups.create.assert_called_once_with(
1, name="backup", description=None, container=None)
self.service._wait_available_volume.assert_called_once_with(
self.cinder.backups.create.return_value)
self.assertEqual(self.service._wait_available_volume.return_value,
return_backup)
self._test_atomic_action_timer(self.atomic_actions(),
"cinder_v1.create_backup")
def test_create_volume_type(self):
self.service.generate_random_name = mock.MagicMock(
return_value="volume_type")
return_type = self.service.create_volume_type(name=None)
self.cinder.volume_types.create.assert_called_once_with(
name="volume_type")
self.assertEqual(self.cinder.volume_types.create.return_value,
return_type)
self._test_atomic_action_timer(self.atomic_actions(),
"cinder_v1.create_volume_type")
def test_create_volume_type_with_name(self):
return_type = self.service.create_volume_type(name="volume_type")
self.cinder.volume_types.create.assert_called_once_with(
name="volume_type")
self.assertEqual(self.cinder.volume_types.create.return_value,
return_type)
self._test_atomic_action_timer(self.atomic_actions(),
"cinder_v1.create_volume_type")
class UnifiedCinderV1ServiceTestCase(test.TestCase):
def setUp(self):
super(UnifiedCinderV1ServiceTestCase, self).setUp()
self.clients = mock.MagicMock()
self.service = cinder_v1.UnifiedCinderV1Service(self.clients)
self.service._impl = mock.MagicMock()
def test__unify_volume(self):
class SomeVolume(object):
id = 1
display_name = "volume"
size = 1
status = "st"
volume = self.service._unify_volume(SomeVolume())
self.assertEqual(1, volume.id)
self.assertEqual("volume", volume.name)
self.assertEqual(1, volume.size)
self.assertEqual("st", volume.status)
def test__unify_volume_with_dict(self):
some_volume = {"display_name": "volume", "id": 1,
"size": 1, "status": "st"}
volume = self.service._unify_volume(some_volume)
self.assertEqual(1, volume.id)
self.assertEqual("volume", volume.name)
self.assertEqual(1, volume.size)
self.assertEqual("st", volume.status)
def test__unify_snapshot(self):
class SomeSnapshot(object):
id = 1
display_name = "snapshot"
volume_id = "volume"
status = "st"
snapshot = self.service._unify_snapshot(SomeSnapshot())
self.assertEqual(1, snapshot.id)
self.assertEqual("snapshot", snapshot.name)
self.assertEqual("volume", snapshot.volume_id)
self.assertEqual("st", snapshot.status)
def test_create_volume(self):
self.service._unify_volume = mock.MagicMock()
self.assertEqual(self.service._unify_volume.return_value,
self.service.create_volume(1))
self.service._impl.create_volume.assert_called_once_with(
1, availability_zone=None, display_description=None,
display_name=None, imageRef=None, metadata=None,
project_id=None, snapshot_id=None, source_volid=None,
user_id=None, volume_type=None)
self.service._unify_volume.assert_called_once_with(
self.service._impl.create_volume.return_value)
def test_list_volumes(self):
self.service._unify_volume = mock.MagicMock()
self.service._impl.list_volumes.return_value = ["vol"]
self.assertEqual([self.service._unify_volume.return_value],
self.service.list_volumes(detailed=True))
self.service._impl.list_volumes.assert_called_once_with(detailed=True)
self.service._unify_volume.assert_called_once_with("vol")
def test_get_volume(self):
self.service._unify_volume = mock.MagicMock()
self.assertEqual(self.service._unify_volume.return_value,
self.service.get_volume(1))
self.service._impl.get_volume.assert_called_once_with(1)
self.service._unify_volume.assert_called_once_with(
self.service._impl.get_volume.return_value)
def test_extend_volume(self):
self.service._unify_volume = mock.MagicMock()
self.assertEqual(self.service._unify_volume.return_value,
self.service.extend_volume("volume", new_size=1))
self.service._impl.extend_volume.assert_called_once_with("volume",
new_size=1)
self.service._unify_volume.assert_called_once_with(
self.service._impl.extend_volume.return_value)
def test_update_volume(self):
self.service._unify_volume = mock.MagicMock()
self.assertEqual(
self.service._unify_volume.return_value,
self.service.update_volume(1, name="volume",
description="fake"))
self.service._impl.update_volume.assert_called_once_with(
1, display_description="fake", display_name="volume")
self.service._unify_volume.assert_called_once_with(
self.service._impl.update_volume.return_value)
def test_list_types(self):
self.assertEqual(
self.service._impl.list_types.return_value,
self.service.list_types(search_opts=None))
self.service._impl.list_types.assert_called_once_with(
search_opts=None)
def test_create_snapshot(self):
self.service._unify_snapshot = mock.MagicMock()
self.assertEqual(
self.service._unify_snapshot.return_value,
self.service.create_snapshot(1, force=False,
name=None,
description=None))
self.service._impl.create_snapshot.assert_called_once_with(
1, force=False, display_name=None, display_description=None)
self.service._unify_snapshot.assert_called_once_with(
self.service._impl.create_snapshot.return_value)
def test_list_snapshots(self):
self.service._unify_snapshot = mock.MagicMock()
self.service._impl.list_snapshots.return_value = ["snapshot"]
self.assertEqual([self.service._unify_snapshot.return_value],
self.service.list_snapshots(detailed=True))
self.service._impl.list_snapshots.assert_called_once_with(
detailed=True)
self.service._unify_snapshot.assert_called_once_with(
"snapshot")
def test_create_backup(self):
self.service._unify_backup = mock.MagicMock()
self.assertEqual(
self.service._unify_backup.return_value,
self.service.create_backup(1, container=None,
name=None,
description=None))
self.service._impl.create_backup.assert_called_once_with(
1, container=None, name=None, description=None)
self.service._unify_backup(
self.service._impl.create_backup.return_value)
def test_create_volume_type(self):
self.assertEqual(
self.service._impl.create_volume_type.return_value,
self.service.create_volume_type(name="type"))
self.service._impl.create_volume_type.assert_called_once_with(
name="type")
def test_restore_backup(self):
self.service._unify_volume = mock.MagicMock()
self.assertEqual(self.service._unify_volume.return_value,
self.service.restore_backup(1, volume_id=1))
self.service._impl.restore_backup.assert_called_once_with(1,
volume_id=1)
self.service._unify_volume.assert_called_once_with(
self.service._impl.restore_backup.return_value)
| {
"repo_name": "yeming233/rally",
"path": "tests/unit/plugins/openstack/services/storage/test_cinder_v1.py",
"copies": "1",
"size": "15980",
"license": "apache-2.0",
"hash": 5436814135956243000,
"line_mean": 44.014084507,
"line_max": 78,
"alpha_frac": 0.6107634543,
"autogenerated": false,
"ratio": 4.050697084917617,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 355
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from rally.plugins.openstack.context.monasca import metrics
from rally.plugins.openstack.scenarios.monasca import utils as monasca_utils
from tests.unit import test
CTX = "rally.plugins.openstack.context.monasca"
class MonascaMetricGeneratorTestCase(test.TestCase):
def _gen_tenants(self, count):
tenants = {}
for id in six.moves.range(count):
tenants[str(id)] = {"name": str(id)}
return tenants
def _gen_context(self, tenants_count, users_per_tenant,
metrics_per_tenant):
tenants = self._gen_tenants(tenants_count)
users = []
for id in tenants.keys():
for i in six.moves.range(users_per_tenant):
users.append({"id": i, "tenant_id": id,
"endpoint": mock.MagicMock()})
context = test.get_test_context()
context.update({
"config": {
"users": {
"tenants": tenants_count,
"users_per_tenant": users_per_tenant,
"concurrent": 10,
},
"monasca_metrics": {
"name": "fake-metric-name",
"dimensions": {
"region": "fake-region",
"service": "fake-identity",
"hostname": "fake-hostname",
"url": "fake-url"
},
"metrics_per_tenant": metrics_per_tenant,
},
"roles": [
"monasca-user"
]
},
"admin": {
"endpoint": mock.MagicMock()
},
"users": users,
"tenants": tenants
})
return tenants, context
@mock.patch("%s.metrics.rutils.interruptable_sleep" % CTX)
@mock.patch("%s.metrics.monasca_utils.MonascaScenario" % CTX)
def test_setup(self, mock_monasca_scenario, mock_interruptable_sleep):
tenants_count = 2
users_per_tenant = 4
metrics_per_tenant = 5
tenants, real_context = self._gen_context(
tenants_count, users_per_tenant, metrics_per_tenant)
monasca_ctx = metrics.MonascaMetricGenerator(real_context)
monasca_ctx.setup()
self.assertEqual(tenants_count, mock_monasca_scenario.call_count,
"Scenario should be constructed same times as "
"number of tenants")
self.assertEqual(metrics_per_tenant * tenants_count,
mock_monasca_scenario.return_value._create_metrics.
call_count,
"Total number of metrics created should be tenant"
"counts times metrics per tenant")
first_call = mock.call(0.001)
second_call = mock.call(monasca_utils.CONF.benchmark.
monasca_metric_create_prepoll_delay,
atomic_delay=1)
self.assertEqual([first_call] * metrics_per_tenant * tenants_count +
[second_call],
mock_interruptable_sleep.call_args_list,
"Method interruptable_sleep should be called "
"tenant counts times metrics plus one")
| {
"repo_name": "varuntiwari27/rally",
"path": "tests/unit/plugins/openstack/context/monasca/test_metrics.py",
"copies": "4",
"size": "3936",
"license": "apache-2.0",
"hash": -3054166805852852700,
"line_mean": 38.36,
"line_max": 76,
"alpha_frac": 0.5505589431,
"autogenerated": false,
"ratio": 4.282916213275299,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6833475156375299,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.api.rpc.handlers import securitygroups_rpc
from neutron.tests import base
class SecurityGroupServerRpcApiTestCase(base.BaseTestCase):
def test_security_group_rules_for_devices(self):
rpcapi = securitygroups_rpc.SecurityGroupServerRpcApi('fake_topic')
with mock.patch.object(rpcapi.client, 'call') as rpc_mock,\
mock.patch.object(rpcapi.client, 'prepare') as prepare_mock:
prepare_mock.return_value = rpcapi.client
rpcapi.security_group_rules_for_devices('context', ['fake_device'])
rpc_mock.assert_called_once_with(
'context',
'security_group_rules_for_devices',
devices=['fake_device'])
class SGAgentRpcCallBackMixinTestCase(base.BaseTestCase):
def setUp(self):
super(SGAgentRpcCallBackMixinTestCase, self).setUp()
self.rpc = securitygroups_rpc.SecurityGroupAgentRpcCallbackMixin()
self.rpc.sg_agent = mock.Mock()
def test_security_groups_rule_updated(self):
self.rpc.security_groups_rule_updated(None,
security_groups=['fake_sgid'])
self.rpc.sg_agent.assert_has_calls(
[mock.call.security_groups_rule_updated(['fake_sgid'])])
def test_security_groups_member_updated(self):
self.rpc.security_groups_member_updated(None,
security_groups=['fake_sgid'])
self.rpc.sg_agent.assert_has_calls(
[mock.call.security_groups_member_updated(['fake_sgid'])])
def test_security_groups_provider_updated(self):
self.rpc.security_groups_provider_updated(None)
self.rpc.sg_agent.assert_has_calls(
[mock.call.security_groups_provider_updated(None)])
| {
"repo_name": "sasukeh/neutron",
"path": "neutron/tests/unit/api/rpc/handlers/test_securitygroups_rpc.py",
"copies": "39",
"size": "2417",
"license": "apache-2.0",
"hash": 4242682656836666000,
"line_mean": 39.9661016949,
"line_max": 79,
"alpha_frac": 0.65866777,
"autogenerated": false,
"ratio": 4.035058430717863,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 59
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.benchmark.context.sahara import sahara_edp
from tests.unit import test
BASE_CTX = "rally.benchmark.context"
CTX = "rally.benchmark.context.sahara"
SCN = "rally.benchmark.scenarios"
class SaharaEDPTestCase(test.TestCase):
def setUp(self):
super(SaharaEDPTestCase, self).setUp()
self.tenants_num = 2
self.users_per_tenant = 2
self.users = self.tenants_num * self.users_per_tenant
self.task = mock.MagicMock()
self.tenants = {}
self.users_key = []
for i in range(self.tenants_num):
self.tenants[str(i)] = {"id": str(i), "name": str(i),
"sahara_image": "42"}
for j in range(self.users_per_tenant):
self.users_key.append({"id": "%s_%s" % (str(i), str(j)),
"tenant_id": str(i),
"endpoint": "endpoint"})
self.user_key = [{"id": i, "tenant_id": j, "endpoint": "endpoint"}
for j in range(self.tenants_num)
for i in range(self.users_per_tenant)]
@property
def context_without_edp_keys(self):
return {
"config": {
"users": {
"tenants": self.tenants_num,
"users_per_tenant": self.users_per_tenant,
},
"sahara_edp": {
"input_type": "hdfs",
"output_type": "hdfs",
"input_url": "hdfs://test_host/",
"output_url_prefix": "hdfs://test_host/out_",
"libs": [
{
"name": "test.jar",
"download_url": "http://example.com/test.jar"
}
]
},
},
"admin": {"endpoint": mock.MagicMock()},
"task": mock.MagicMock(),
"users": self.users_key,
"tenants": self.tenants
}
@mock.patch("%s.sahara_edp.resource_manager.cleanup" % CTX)
@mock.patch("%s.sahara_edp.requests" % CTX)
@mock.patch("%s.sahara_edp.osclients" % CTX)
def test_setup_and_cleanup(self, mock_osclients, mock_requests,
mock_cleanup):
mock_sahara = mock_osclients.Clients(mock.MagicMock()).sahara()
mock_sahara.data_sources.create.return_value = mock.MagicMock(id=42)
mock_sahara.job_binary_internals.create.return_value = (
mock.MagicMock(id=42))
mock_requests.get().content = "test_binary"
ctx = self.context_without_edp_keys
sahara_ctx = sahara_edp.SaharaEDP(ctx)
input_ds_crete_calls = []
download_calls = []
job_binary_internals_calls = []
job_binaries_calls = []
for i in range(self.tenants_num):
input_ds_crete_calls.append(mock.call(
name="input_ds", description="",
data_source_type="hdfs",
url="hdfs://test_host/"))
download_calls.append(mock.call("http://example.com/test.jar"))
job_binary_internals_calls.append(mock.call(
name="test.jar",
data="test_binary"))
job_binaries_calls.append(mock.call(
name="test.jar",
url="internal-db://42",
description="",
extra={}))
sahara_ctx.setup()
mock_sahara.data_sources.create.assert_has_calls(input_ds_crete_calls)
mock_requests.get.assert_has_calls(download_calls)
mock_sahara.job_binary_internals.create.assert_has_calls(
job_binary_internals_calls)
mock_sahara.job_binaries.create.assert_has_calls(job_binaries_calls)
sahara_ctx.cleanup()
mock_cleanup.assert_called_once_with(
names=["sahara.job_executions", "sahara.jobs",
"sahara.job_binary_internals", "sahara.job_binaries",
"sahara.data_sources"],
users=ctx["users"])
| {
"repo_name": "pandeyop/rally",
"path": "tests/unit/benchmark/context/sahara/test_sahara_edp.py",
"copies": "2",
"size": "4688",
"license": "apache-2.0",
"hash": -5845531631222526000,
"line_mean": 36.2063492063,
"line_max": 78,
"alpha_frac": 0.5407423208,
"autogenerated": false,
"ratio": 3.8552631578947367,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 126
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally import exceptions
from rally.plugins.openstack.context import fuel
from tests.unit import test
BASE = "rally.plugins.openstack.context.fuel"
class FuelEnvGeneratorTestCase(test.TestCase):
@mock.patch(BASE + ".FuelEnvGenerator._create_envs",
return_value=["env1"])
@mock.patch(BASE + ".fuel_utils.FuelScenario")
def test_setup(self, mock_fuel_scenario, mock__create_envs):
context = {}
context["config"] = {"fuel_environments": {"environments": 1}}
context["task"] = {"uuid": "some_uuid"}
context["admin"] = {"credential": "some_credential"}
env_ctx = fuel.FuelEnvGenerator(context)
env_ctx.setup()
self.assertIn("fuel", env_ctx.context)
self.assertIn("environments", env_ctx.context["fuel"])
mock__create_envs.assert_called_once_with()
mock_fuel_scenario.assert_called_once_with(context)
@mock.patch(BASE + ".FuelEnvGenerator._create_envs",
return_value=["env1"])
@mock.patch(BASE + ".fuel_utils.FuelScenario")
def test_setup_error(self, mock_fuel_scenario, mock__create_envs):
context = {}
context["config"] = {"fuel_environments": {"environments": 5}}
context["task"] = {"uuid": "some_uuid"}
context["admin"] = {"credential": "some_credential"}
env_ctx = fuel.FuelEnvGenerator(context)
self.assertRaises(exceptions.ContextSetupFailure, env_ctx.setup)
def test__create_envs(self):
config = {"environments": 4,
"release_id": 42,
"network_provider": "provider",
"deployment_mode": "mode",
"net_segment_type": "type",
"resource_management_workers": 3}
context = {"task": {},
"config": {"fuel_environments": config}}
env_ctx = fuel.FuelEnvGenerator(context)
env_ctx.fscenario = mock.Mock()
env_ctx.fscenario.return_value._create_environment.return_value = "id"
self.assertEqual(config["environments"], len(env_ctx._create_envs()))
enves = config.pop("environments")
config.pop("resource_management_workers")
exp_calls = [mock.call(**config) for i in range(enves)]
self.assertEqual(
exp_calls,
env_ctx.fscenario._create_environment.mock_calls)
def test__delete_envs(self):
config = {"release_id": 42,
"network_provider": "provider",
"deployment_mode": "mode",
"net_segment_type": "type",
"resource_management_workers": 3}
context = {"task": {},
"config": {"fuel_environments": config},
"fuel": {"environments": ["id", "id", "id"]}}
env_ctx = fuel.FuelEnvGenerator(context)
env_ctx.fscenario = mock.Mock()
env_ctx._delete_envs()
self.assertEqual({}, context["fuel"])
def test_cleanup(self):
config = {"release_id": 42,
"network_provider": "provider",
"deployment_mode": "mode",
"net_segment_type": "type",
"resource_management_workers": 3}
context = {"task": {"uuid": "some_id"},
"config": {"fuel_environments": config},
"fuel": {"environments": ["id", "id", "id"]}}
env_ctx = fuel.FuelEnvGenerator(context)
env_ctx._delete_envs = mock.Mock()
env_ctx.cleanup()
env_ctx._delete_envs.assert_called_once_with()
| {
"repo_name": "amit0701/rally",
"path": "tests/unit/plugins/openstack/context/test_fuel.py",
"copies": "1",
"size": "4168",
"license": "apache-2.0",
"hash": -5473221961595771000,
"line_mean": 37.5925925926,
"line_max": 78,
"alpha_frac": 0.5899712092,
"autogenerated": false,
"ratio": 3.8953271028037384,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49852983120037386,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally import exceptions
from rally.plugins.openstack.context.sahara import sahara_image
from tests.unit import test
BASE_CTX = "rally.task.context"
CTX = "rally.plugins.openstack.context.sahara.sahara_image"
BASE_SCN = "rally.task.scenarios"
SCN = "rally.plugins.openstack.scenarios"
class SaharaImageTestCase(test.ScenarioTestCase):
def setUp(self):
super(SaharaImageTestCase, self).setUp()
self.tenants_num = 2
self.users_per_tenant = 2
self.users = self.tenants_num * self.users_per_tenant
self.task = mock.MagicMock()
self.tenants = {}
self.users_key = []
for i in range(self.tenants_num):
self.tenants[str(i)] = {"id": str(i), "name": str(i),
"sahara": {"image": "42"}}
for j in range(self.users_per_tenant):
self.users_key.append({"id": "%s_%s" % (str(i), str(j)),
"tenant_id": str(i),
"credential": mock.MagicMock()})
@property
def url_image_context(self):
self.context.update({
"config": {
"users": {
"tenants": self.tenants_num,
"users_per_tenant": self.users_per_tenant,
},
"sahara_image": {
"image_url": "http://somewhere",
"plugin_name": "test_plugin",
"hadoop_version": "test_version",
"username": "test_user"
}
},
"admin": {"credential": mock.MagicMock()},
"users": self.users_key,
"tenants": self.tenants
})
return self.context
@property
def existing_image_context(self):
self.context.update({
"config": {
"users": {
"tenants": self.tenants_num,
"users_per_tenant": self.users_per_tenant,
},
"sahara_image": {
"image_uuid": "some_id"
}
},
"admin": {"credential": mock.MagicMock()},
"users": self.users_key,
"tenants": self.tenants,
})
return self.context
@mock.patch("rally.plugins.openstack.services."
"image.image.Image")
@mock.patch("%s.resource_manager.cleanup" % CTX)
@mock.patch("rally.osclients.Clients")
def test_setup_and_cleanup_url_image(self, mock_clients,
mock_cleanup, mock_image):
ctx = self.url_image_context
sahara_ctx = sahara_image.SaharaImage(ctx)
sahara_ctx.generate_random_name = mock.Mock()
image_service = mock.Mock()
mock_image.return_value = image_service
image_service.create_image.return_value = mock.Mock(id=42)
clients = mock.Mock()
mock_clients.return_value = clients
sahara_client = mock.Mock()
clients.sahara.return_value = sahara_client
glance_calls = []
for i in range(self.tenants_num):
glance_calls.append(
mock.call(container_format="bare",
image_location="http://somewhere",
disk_format="qcow2"))
sahara_update_image_calls = []
sahara_update_tags_calls = []
for i in range(self.tenants_num):
sahara_update_image_calls.append(mock.call(image_id=42,
user_name="test_user",
desc=""))
sahara_update_tags_calls.append(mock.call(
image_id=42,
new_tags=["test_plugin", "test_version"]))
sahara_ctx.setup()
image_service.create_image.assert_has_calls(glance_calls)
sahara_client.images.update_image.assert_has_calls(
sahara_update_image_calls)
sahara_client.images.update_tags.assert_has_calls(
sahara_update_tags_calls)
sahara_ctx.cleanup()
mock_cleanup.assert_called_once_with(
names=["glance.images"],
users=ctx["users"],
superclass=sahara_ctx.__class__,
task_id=ctx["owner_id"])
@mock.patch("%s.glance.utils.GlanceScenario._create_image" % SCN,
return_value=mock.MagicMock(id=42))
@mock.patch("%s.resource_manager.cleanup" % CTX)
@mock.patch("%s.osclients.Clients" % CTX)
def test_setup_and_cleanup_existing_image(
self, mock_clients, mock_cleanup,
mock_glance_scenario__create_image):
mock_clients.glance.images.get.return_value = mock.MagicMock(
is_public=True)
ctx = self.existing_image_context
sahara_ctx = sahara_image.SaharaImage(ctx)
sahara_ctx.setup()
for tenant_id in sahara_ctx.context["tenants"]:
image_id = (
sahara_ctx.context["tenants"][tenant_id]["sahara"]["image"])
self.assertEqual("some_id", image_id)
self.assertFalse(mock_glance_scenario__create_image.called)
sahara_ctx.cleanup()
self.assertFalse(mock_cleanup.called)
@mock.patch("%s.osclients.Glance.create_client" % CTX)
def test_check_existing_image(self, mock_glance_create_client):
ctx = self.existing_image_context
sahara_ctx = sahara_image.SaharaImage(ctx)
sahara_ctx.setup()
mock_glance_create_client.images.get.asser_called_once_with("some_id")
@mock.patch("%s.osclients.Glance.create_client" % CTX)
def test_check_existing_private_image_fail(self,
mock_glance_create_client):
mock_glance_create_client.return_value.images.get.return_value = (
mock.MagicMock(is_public=False))
ctx = self.existing_image_context
sahara_ctx = sahara_image.SaharaImage(ctx)
self.assertRaises(exceptions.ContextSetupFailure,
sahara_ctx.setup)
mock_glance_create_client.images.get.asser_called_once_with("some_id")
| {
"repo_name": "yeming233/rally",
"path": "tests/unit/plugins/openstack/context/sahara/test_sahara_image.py",
"copies": "1",
"size": "6755",
"license": "apache-2.0",
"hash": -823893245456700900,
"line_mean": 35.7119565217,
"line_max": 78,
"alpha_frac": 0.5643227239,
"autogenerated": false,
"ratio": 3.9068825910931175,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9971205314993118,
"avg_score": 0,
"num_lines": 184
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally import exceptions
from rally.plugins.openstack.scenarios.ceilometer import events
from tests.unit import test
class CeilometerEventsTestCase(test.ScenarioTestCase):
def setUp(self):
super(CeilometerEventsTestCase, self).setUp()
patch = mock.patch(
"rally.plugins.openstack.services.identity.identity.Identity")
self.addCleanup(patch.stop)
self.mock_identity = patch.start()
def get_test_context(self):
context = super(CeilometerEventsTestCase, self).get_test_context()
context["admin"] = {"id": "fake_user_id",
"credential": mock.MagicMock()
}
return context
def test_list_events(self):
scenario = events.CeilometerEventsCreateUserAndListEvents(self.context)
scenario._list_events = mock.MagicMock()
scenario.run()
self.mock_identity.return_value.create_user.assert_called_once_with()
scenario._list_events.assert_called_once_with()
def test_list_events_fails(self):
scenario = events.CeilometerEventsCreateUserAndListEvents(self.context)
scenario._list_events = mock.MagicMock(return_value=[])
self.assertRaises(exceptions.RallyException, scenario.run)
self.mock_identity.return_value.create_user.assert_called_once_with()
scenario._list_events.assert_called_once_with()
def test_list_event_types(self):
scenario = events.CeilometerEventsCreateUserAndListEventTypes(
self.context)
scenario._list_event_types = mock.MagicMock()
scenario.run()
self.mock_identity.return_value.create_user.assert_called_once_with()
scenario._list_event_types.assert_called_once_with()
def test_list_event_types_fails(self):
scenario = events.CeilometerEventsCreateUserAndListEventTypes(
self.context)
scenario._list_event_types = mock.MagicMock(return_value=[])
self.assertRaises(exceptions.RallyException, scenario.run)
self.mock_identity.return_value.create_user.assert_called_once_with()
scenario._list_event_types.assert_called_once_with()
def test_get_event(self):
scenario = events.CeilometerEventsCreateUserAndGetEvent(self.context)
scenario._get_event = mock.MagicMock()
scenario._list_events = mock.MagicMock(
return_value=[mock.Mock(message_id="fake_id")])
scenario.run()
self.mock_identity.return_value.create_user.assert_called_once_with()
scenario._list_events.assert_called_with()
scenario._get_event.assert_called_with(event_id="fake_id")
def test_get_event_fails(self):
scenario = events.CeilometerEventsCreateUserAndGetEvent(self.context)
scenario._list_events = mock.MagicMock(return_value=[])
scenario._get_event = mock.MagicMock()
self.assertRaises(exceptions.RallyException, scenario.run)
self.mock_identity.return_value.create_user.assert_called_once_with()
scenario._list_events.assert_called_with()
self.assertFalse(scenario._get_event.called)
| {
"repo_name": "yeming233/rally",
"path": "tests/unit/plugins/openstack/scenarios/ceilometer/test_events.py",
"copies": "1",
"size": "3763",
"license": "apache-2.0",
"hash": 4287212602965468000,
"line_mean": 35.5339805825,
"line_max": 79,
"alpha_frac": 0.6874833909,
"autogenerated": false,
"ratio": 4.016008537886873,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 103
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.context.cinder import volume_types
from tests.unit import test
CTX = "rally.plugins.openstack.context.cinder.volume_types"
SERVICE = "rally.plugins.openstack.services.storage"
class VolumeTypeGeneratorTestCase(test.ContextTestCase):
def setUp(self):
super(VolumeTypeGeneratorTestCase, self).setUp()
self.context.update({"admin": {"credential": "admin_creds"}})
@mock.patch("%s.block.BlockStorage" % SERVICE)
def test_setup(self, mock_block_storage):
self.context.update({"config": {"volume_types": ["foo", "bar"]}})
mock_service = mock_block_storage.return_value
mock_service.create_volume_type.side_effect = (
mock.Mock(id="foo-id"), mock.Mock(id="bar-id"))
vtype_ctx = volume_types.VolumeTypeGenerator(self.context)
vtype_ctx.setup()
mock_service.create_volume_type.assert_has_calls(
[mock.call("foo"), mock.call("bar")])
self.assertEqual(self.context["volume_types"],
[{"id": "foo-id", "name": "foo"},
{"id": "bar-id", "name": "bar"}])
@mock.patch("%s.utils.make_name_matcher" % CTX)
@mock.patch("%s.resource_manager.cleanup" % CTX)
def test_cleanup(self, mock_cleanup, mock_make_name_matcher):
self.context.update({
"config": {"volume_types": ["foo", "bar"],
"api_versions": {
"cinder": {"version": 2,
"service_type": "volumev2"}}}})
vtype_ctx = volume_types.VolumeTypeGenerator(self.context)
vtype_ctx.cleanup()
mock_cleanup.assert_called_once_with(
names=["cinder.volume_types"],
admin=self.context["admin"],
api_versions=self.context["config"]["api_versions"],
superclass=mock_make_name_matcher.return_value,
task_id=vtype_ctx.get_owner_id())
mock_make_name_matcher.assert_called_once_with("foo", "bar")
| {
"repo_name": "yeming233/rally",
"path": "tests/unit/plugins/openstack/context/cinder/test_volume_types.py",
"copies": "1",
"size": "2601",
"license": "apache-2.0",
"hash": 5458098941993561000,
"line_mean": 39.0153846154,
"line_max": 75,
"alpha_frac": 0.6324490581,
"autogenerated": false,
"ratio": 3.8193832599118944,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9951832318011894,
"avg_score": 0,
"num_lines": 65
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.context.quotas import cinder_quotas
from tests.unit import test
class CinderQuotasTestCase(test.TestCase):
def test_update(self):
mock_clients = mock.MagicMock()
cinder_quo = cinder_quotas.CinderQuotas(mock_clients)
tenant_id = mock.MagicMock()
quotas_values = {
"volumes": 10,
"snapshots": 50,
"gigabytes": 1000
}
cinder_quo.update(tenant_id, **quotas_values)
mock_clients.cinder().quotas.update.assert_called_once_with(
tenant_id, **quotas_values)
def test_delete(self):
mock_clients = mock.MagicMock()
cinder_quo = cinder_quotas.CinderQuotas(mock_clients)
tenant_id = mock.MagicMock()
cinder_quo.delete(tenant_id)
mock_clients.cinder().quotas.delete.assert_called_once_with(tenant_id)
def test_get(self):
tenant_id = "tenant_id"
quotas = {"gigabytes": "gb", "snapshots": "ss", "volumes": "v"}
quota_set = mock.MagicMock(**quotas)
clients = mock.MagicMock()
clients.cinder.return_value.quotas.get.return_value = quota_set
cinder_quo = cinder_quotas.CinderQuotas(clients)
self.assertEqual(quotas, cinder_quo.get(tenant_id))
clients.cinder().quotas.get.assert_called_once_with(tenant_id)
| {
"repo_name": "gluke77/rally",
"path": "tests/unit/plugins/openstack/context/quotas/test_cinder_quotas.py",
"copies": "3",
"size": "1954",
"license": "apache-2.0",
"hash": 3398495095937224700,
"line_mean": 36.5769230769,
"line_max": 78,
"alpha_frac": 0.6632548618,
"autogenerated": false,
"ratio": 3.779497098646035,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 52
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.context.sahara import sahara_edp
from tests.unit import test
CTX = "rally.plugins.openstack.context.sahara"
class SaharaEDPTestCase(test.TestCase):
def setUp(self):
super(SaharaEDPTestCase, self).setUp()
self.tenants_num = 2
self.users_per_tenant = 2
self.users = self.tenants_num * self.users_per_tenant
self.task = mock.MagicMock()
self.tenants = {}
self.users_key = []
for i in range(self.tenants_num):
self.tenants[str(i)] = {"id": str(i), "name": str(i),
"sahara_image": "42"}
for j in range(self.users_per_tenant):
self.users_key.append({"id": "%s_%s" % (str(i), str(j)),
"tenant_id": str(i),
"endpoint": "endpoint"})
self.user_key = [{"id": i, "tenant_id": j, "endpoint": "endpoint"}
for j in range(self.tenants_num)
for i in range(self.users_per_tenant)]
@property
def context_without_edp_keys(self):
return {
"config": {
"users": {
"tenants": self.tenants_num,
"users_per_tenant": self.users_per_tenant,
},
"sahara_edp": {
"input_type": "hdfs",
"output_type": "hdfs",
"input_url": "hdfs://test_host/",
"output_url_prefix": "hdfs://test_host/out_",
"libs": [
{
"name": "test.jar",
"download_url": "http://example.com/test.jar"
}
]
},
},
"admin": {"endpoint": mock.MagicMock()},
"task": mock.MagicMock(),
"users": self.users_key,
"tenants": self.tenants
}
@mock.patch("%s.sahara_edp.resource_manager.cleanup" % CTX)
@mock.patch("%s.sahara_edp.requests" % CTX)
@mock.patch("%s.sahara_edp.osclients" % CTX)
def test_setup_and_cleanup(self, mock_osclients, mock_requests,
mock_cleanup):
mock_sahara = mock_osclients.Clients(mock.MagicMock()).sahara()
mock_sahara.data_sources.create.return_value = mock.MagicMock(id=42)
mock_sahara.job_binary_internals.create.return_value = (
mock.MagicMock(id=42))
mock_requests.get().content = "test_binary"
ctx = self.context_without_edp_keys
sahara_ctx = sahara_edp.SaharaEDP(ctx)
input_ds_crete_calls = []
download_calls = []
job_binary_internals_calls = []
job_binaries_calls = []
for i in range(self.tenants_num):
input_ds_crete_calls.append(mock.call(
name="input_ds", description="",
data_source_type="hdfs",
url="hdfs://test_host/"))
download_calls.append(mock.call("http://example.com/test.jar"))
job_binary_internals_calls.append(mock.call(
name="test.jar",
data="test_binary"))
job_binaries_calls.append(mock.call(
name="test.jar",
url="internal-db://42",
description="",
extra={}))
sahara_ctx.setup()
mock_sahara.data_sources.create.assert_has_calls(input_ds_crete_calls)
mock_requests.get.assert_has_calls(download_calls)
mock_sahara.job_binary_internals.create.assert_has_calls(
job_binary_internals_calls)
mock_sahara.job_binaries.create.assert_has_calls(job_binaries_calls)
sahara_ctx.cleanup()
mock_cleanup.assert_called_once_with(
names=["sahara.job_executions", "sahara.jobs",
"sahara.job_binary_internals", "sahara.job_binaries",
"sahara.data_sources"],
users=ctx["users"])
| {
"repo_name": "go-bears/rally",
"path": "tests/unit/plugins/openstack/context/sahara/test_sahara_edp.py",
"copies": "5",
"size": "4633",
"license": "apache-2.0",
"hash": -7455143277367836000,
"line_mean": 36.3629032258,
"line_max": 78,
"alpha_frac": 0.5385279517,
"autogenerated": false,
"ratio": 3.876987447698745,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 124
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.ceilometer import alarms
from tests.unit import test
class CeilometerAlarmsTestCase(test.ScenarioTestCase):
def test_create_alarm(self):
scenario = alarms.CreateAlarm(self.context)
scenario._create_alarm = mock.MagicMock()
scenario.run("fake_meter_name", "fake_threshold", fakearg="f")
scenario._create_alarm.assert_called_once_with("fake_meter_name",
"fake_threshold",
{"fakearg": "f"})
def test_list_alarm(self):
scenario = alarms.ListAlarms(self.context)
scenario._list_alarms = mock.MagicMock()
scenario.run()
scenario._list_alarms.assert_called_once_with()
def test_create_and_list_alarm(self):
fake_alarm = mock.MagicMock()
scenario = alarms.CreateAndListAlarm(self.context)
scenario._create_alarm = mock.MagicMock(return_value=fake_alarm)
scenario._list_alarms = mock.MagicMock()
scenario.run("fake_meter_name", "fake_threshold", fakearg="f")
scenario._create_alarm.assert_called_once_with("fake_meter_name",
"fake_threshold",
{"fakearg": "f"})
scenario._list_alarms.assert_called_once_with(fake_alarm.alarm_id)
def test_create_and_update_alarm(self):
fake_alram_dict_diff = {"description": "Changed Test Description"}
fake_alarm = mock.MagicMock()
scenario = alarms.CreateAndUpdateAlarm(self.context)
scenario._create_alarm = mock.MagicMock(return_value=fake_alarm)
scenario._update_alarm = mock.MagicMock()
scenario.run("fake_meter_name", "fake_threshold", fakearg="f")
scenario._create_alarm.assert_called_once_with("fake_meter_name",
"fake_threshold",
{"fakearg": "f"})
scenario._update_alarm.assert_called_once_with(fake_alarm.alarm_id,
fake_alram_dict_diff)
def test_create_and_delete_alarm(self):
fake_alarm = mock.MagicMock()
scenario = alarms.CreateAndDeleteAlarm(self.context)
scenario._create_alarm = mock.MagicMock(return_value=fake_alarm)
scenario._delete_alarm = mock.MagicMock()
scenario.run("fake_meter_name", "fake_threshold", fakearg="f")
scenario._create_alarm.assert_called_once_with("fake_meter_name",
"fake_threshold",
{"fakearg": "f"})
scenario._delete_alarm.assert_called_once_with(fake_alarm.alarm_id)
def test_create_and_get_alarm_history(self):
alarm = mock.Mock(alarm_id="foo_id")
scenario = alarms.CreateAlarmAndGetHistory(
self.context)
scenario._create_alarm = mock.MagicMock(return_value=alarm)
scenario._get_alarm_state = mock.MagicMock()
scenario._get_alarm_history = mock.MagicMock()
scenario._set_alarm_state = mock.MagicMock()
scenario.run("meter_name", "threshold", "state", 60, fakearg="f")
scenario._create_alarm.assert_called_once_with(
"meter_name", "threshold", {"fakearg": "f"})
scenario._get_alarm_state.assert_called_once_with("foo_id")
scenario._get_alarm_history.assert_called_once_with("foo_id")
scenario._set_alarm_state.assert_called_once_with(alarm, "state", 60)
| {
"repo_name": "vganapath/rally",
"path": "tests/unit/plugins/openstack/scenarios/ceilometer/test_alarms.py",
"copies": "1",
"size": "4256",
"license": "apache-2.0",
"hash": 792654294713155200,
"line_mean": 46.2888888889,
"line_max": 78,
"alpha_frac": 0.6007988722,
"autogenerated": false,
"ratio": 4.132038834951456,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5232837707151455,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.ec2 import servers
from tests.unit import test
class EC2ServersTestCase(test.ScenarioTestCase):
def test_list_servers(self):
scenario = servers.EC2Servers()
scenario._list_servers = mock.MagicMock()
scenario.list_servers()
scenario._list_servers.assert_called_once_with()
def test_boot_server(self):
scenario = servers.EC2Servers(self.context)
scenario._boot_servers = mock.Mock()
scenario.boot_server("foo_image", "foo_flavor", foo="bar")
scenario._boot_servers.assert_called_once_with(
"foo_image", "foo_flavor", foo="bar")
| {
"repo_name": "cernops/rally",
"path": "tests/unit/plugins/openstack/scenarios/ec2/test_servers.py",
"copies": "15",
"size": "1232",
"license": "apache-2.0",
"hash": -8110911692364720000,
"line_mean": 35.2352941176,
"line_max": 75,
"alpha_frac": 0.7167207792,
"autogenerated": false,
"ratio": 3.9361022364217253,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.fuel import environments
from tests.unit import test
class FuelEnvironmentsTestCase(test.ScenarioTestCase):
def test_create_and_list_environments(self):
scenario = environments.FuelEnvironments(self.context)
scenario._create_environment = mock.Mock()
scenario._list_environments = mock.Mock()
scenario.create_and_list_environments(
release_id=2, network_provider="test_neutron",
deployment_mode="test_mode", net_segment_type="test_type")
scenario._create_environment.assert_called_once_with(
release_id=2, network_provider="test_neutron",
deployment_mode="test_mode", net_segment_type="test_type")
scenario._list_environments.assert_called_once_with()
def test_create_and_delete_environments(self):
scenario = environments.FuelEnvironments()
scenario._create_environment = mock.Mock(return_value=42)
scenario._delete_environment = mock.Mock()
scenario.create_and_delete_environment(
release_id=2, network_provider="test_neutron",
deployment_mode="test_mode", net_segment_type="test_type")
scenario._create_environment.assert_called_once_with(
release_id=2, network_provider="test_neutron",
deployment_mode="test_mode", net_segment_type="test_type")
scenario._delete_environment.assert_called_once_with(42, 5)
| {
"repo_name": "varuntiwari27/rally",
"path": "tests/unit/plugins/openstack/scenarios/fuel/test_environments.py",
"copies": "15",
"size": "2068",
"license": "apache-2.0",
"hash": 8308210262706891000,
"line_mean": 40.36,
"line_max": 78,
"alpha_frac": 0.6968085106,
"autogenerated": false,
"ratio": 4.046966731898239,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_config.cfg
import oslo_utils.importutils
_glance_opts = [
oslo_config.cfg.StrOpt('image_api_class',
default='manila.image.glance.API',
help='The full class name of the '
'Glance API class to use.'),
]
oslo_config.cfg.CONF.register_opts(_glance_opts)
def API():
importutils = oslo_utils.importutils
glance_api_class = oslo_config.cfg.CONF.image_api_class
cls = importutils.import_class(glance_api_class)
return cls()
| {
"repo_name": "openstack/manila",
"path": "manila/image/__init__.py",
"copies": "1",
"size": "1110",
"license": "apache-2.0",
"hash": -5156024766018677000,
"line_mean": 32.6363636364,
"line_max": 75,
"alpha_frac": 0.6837837838,
"autogenerated": false,
"ratio": 3.92226148409894,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 33
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_i18n as i18n
_translators = i18n.TranslatorFactory(domain='octavia')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
| {
"repo_name": "jiahaoliang/group-based-policy",
"path": "gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/i18n.py",
"copies": "1",
"size": "1077",
"license": "apache-2.0",
"hash": -6006630586266421000,
"line_mean": 34.9,
"line_max": 78,
"alpha_frac": 0.7298050139,
"autogenerated": false,
"ratio": 3.6508474576271186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4880652471527119,
"avg_score": null,
"num_lines": null
} |
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_service import service
import setproctitle
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
class BaseWorker(service.ServiceBase):
"""Partial implementation of the ServiceBase ABC.
Subclasses will still need to add the other abstract methods defined in
service.ServiceBase. See oslo_service for more details.
If a plugin needs to handle synchronization with the Neutron database and
do this only once instead of in every API worker, for instance, it would
define a BaseWorker class and the plugin would have get_workers return
an array of BaseWorker instances. For example:
.. code-block:: python
class MyPlugin(...):
def get_workers(self):
return [MyPluginWorker()]
class MyPluginWorker(BaseWorker):
def start(self):
super(MyPluginWorker, self).start()
do_sync()
"""
# default class value for case when super().__init__ is not called
_default_process_count = 1
def __init__(self, worker_process_count=_default_process_count,
set_proctitle='on', desc=None):
"""Initialize a worker instance.
:param worker_process_count: Defines how many processes to spawn for
worker:
0 - spawn 1 new worker thread,
1..N - spawn N new worker processes
set_proctitle:
'off' - do not change process title
'on' - set process title to descriptive string and parent
'brief' - set process title to descriptive string
desc:
process descriptive string
"""
self._worker_process_count = worker_process_count
self._my_pid = os.getpid()
self._set_proctitle = set_proctitle
if set_proctitle == 'on':
self._parent_proctitle = setproctitle.getproctitle()
self.desc = desc
@property
def worker_process_count(self):
"""The worker's process count.
:returns: The number of processes to spawn for this worker.
"""
return self._worker_process_count
def setproctitle(self, name="neutron-server", desc=None):
if self._set_proctitle == "off" or os.getpid() == self._my_pid:
return
if not desc:
desc = self.__class__.__name__
proctitle = "%s: %s" % (name, desc)
if self._set_proctitle == "on":
proctitle += " (%s)" % self._parent_proctitle
setproctitle.setproctitle(proctitle)
def start(self, name="neutron-server", desc=None):
"""Start the worker.
If worker_process_count is greater than 0, a callback notification
is sent. Subclasses should call this method before doing their
own start() work.
Automatically sets the process title to indicate that this is a
child worker, customizable via the name and desc arguments.
:returns: None
"""
# If we are a child process, set our proctitle to something useful
desc = desc or self.desc
self.setproctitle(name, desc)
if self.worker_process_count > 0:
registry.notify(resources.PROCESS, events.AFTER_INIT, self.start)
| {
"repo_name": "openstack/neutron-lib",
"path": "neutron_lib/worker.py",
"copies": "1",
"size": "3933",
"license": "apache-2.0",
"hash": 6729065527950240000,
"line_mean": 34.4324324324,
"line_max": 78,
"alpha_frac": 0.6392067124,
"autogenerated": false,
"ratio": 4.439051918735892,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5578258631135892,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import alembic
from alembic import config as alembic_config
import alembic.migration as alembic_migration
from oslo.db import exception as db_exc
from iot.db.sqlalchemy import api as sqla_api
from iot.db.sqlalchemy import models
def _alembic_config():
path = os.path.join(os.path.dirname(__file__), 'alembic.ini')
config = alembic_config.Config(path)
return config
def version(config=None, engine=None):
"""Current database version.
:returns: Database version
:rtype: string
"""
if engine is None:
engine = sqla_api.get_engine()
with engine.connect() as conn:
context = alembic_migration.MigrationContext.configure(conn)
return context.get_current_revision()
def upgrade(revision, config=None):
"""Used for upgrading database.
:param version: Desired database version
:type version: string
"""
revision = revision or 'head'
config = config or _alembic_config()
alembic.command.upgrade(config, revision or 'head')
def create_schema(config=None, engine=None):
"""Create database schema from models description.
Can be used for initial installation instead of upgrade('head').
"""
if engine is None:
engine = sqla_api.get_engine()
if version(engine=engine) is not None:
raise db_exc.DbMigrationError("DB schema is already under version"
" control. Use upgrade() instead")
models.Base.metadata.create_all(engine)
stamp('head', config=config)
def downgrade(revision, config=None):
"""Used for downgrading database.
:param version: Desired database version
:type version: string
"""
revision = revision or 'base'
config = config or _alembic_config()
return alembic.command.downgrade(config, revision)
def stamp(revision, config=None):
"""Stamps database with provided revision.
Don't run any migrations.
:param revision: Should match one from repository or head - to stamp
database with most recent revision
:type revision: string
"""
config = config or _alembic_config()
return alembic.command.stamp(config, revision=revision)
def revision(message=None, autogenerate=False, config=None):
"""Creates template for migration.
:param message: Text that will be used for migration title
:type message: string
:param autogenerate: If True - generates diff based on current database
state
:type autogenerate: bool
"""
config = config or _alembic_config()
return alembic.command.revision(config, message=message,
autogenerate=autogenerate)
| {
"repo_name": "digambar15/openstack-iot",
"path": "iot/db/sqlalchemy/migration.py",
"copies": "1",
"size": "3301",
"license": "apache-2.0",
"hash": 8501516520742854000,
"line_mean": 29.5648148148,
"line_max": 78,
"alpha_frac": 0.6825204483,
"autogenerated": false,
"ratio": 4.178481012658228,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5361001460958228,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import task_runner
if __name__ == '__main__':
(yaml_path, auth_url, project, user, password,
project_domain_id, user_domain_id) = sys.argv[1:]
runner = task_runner.SDKRunner(auth_url, project, user,
password, project_domain_id, user_domain_id)
engine = task_runner.RunnerEngine(yaml_path, runner)
error_msg = engine.run_task_sets()
if error_msg:
sys.exit(error_msg)
| {
"repo_name": "stackforge/tricircle",
"path": "tricircle/tempestplugin/run_yaml_test.py",
"copies": "1",
"size": "1046",
"license": "apache-2.0",
"hash": -736675134300420200,
"line_mean": 35.0689655172,
"line_max": 79,
"alpha_frac": 0.6759082218,
"autogenerated": false,
"ratio": 3.776173285198556,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9952081506998556,
"avg_score": 0,
"num_lines": 29
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
def _get_default_encoding():
return sys.stdin.encoding or sys.getdefaultencoding()
def safe_decode(text, incoming=None, errors="strict"):
"""Decodes incoming string using `incoming` if they're not already unicode.
:param text: text/bytes string to decode
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, (str, bytes)):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, str):
return text
if not incoming:
incoming = _get_default_encoding()
try:
return text.decode(incoming, errors)
except UnicodeDecodeError:
# Note(flaper87) If we get here, it means that
# sys.stdin.encoding / sys.getdefaultencoding
# didn't return a suitable encoding to decode
# text. This happens mostly when global LANG
# var is not set correctly and there's no
# default encoding. In this case, most likely
# python will use ASCII or ANSI encoders as
# default encodings but they won't be capable
# of decoding non-ASCII characters.
#
# Also, UTF-8 is being used since it's an ASCII
# extension.
return text.decode("utf-8", errors)
def safe_encode(text, incoming=None, encoding="utf-8", errors="strict"):
"""Encodes incoming text/bytes string using `encoding`.
If incoming is not specified, text is expected to be encoded with
current python's default encoding. (`sys.getdefaultencoding`)
:param text: Incoming text/bytes string
:param incoming: Text's current encoding
:param encoding: Expected encoding for text (Default UTF-8)
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a bytestring `encoding` encoded representation of it.
:raises TypeError: If text is not an instance of str
See also to_utf8() function which is simpler and don't depend on
the locale encoding.
"""
if not isinstance(text, (str, bytes)):
raise TypeError("%s can't be encoded" % type(text))
if not incoming:
incoming = _get_default_encoding()
# Avoid case issues in comparisons
if hasattr(incoming, "lower"):
incoming = incoming.lower()
if hasattr(encoding, "lower"):
encoding = encoding.lower()
if isinstance(text, str):
return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
return text.encode(encoding, errors)
else:
return text
| {
"repo_name": "openstack/rally",
"path": "rally/utils/encodeutils.py",
"copies": "1",
"size": "3532",
"license": "apache-2.0",
"hash": -5537004377910715000,
"line_mean": 36.5744680851,
"line_max": 79,
"alpha_frac": 0.6758210646,
"autogenerated": false,
"ratio": 4.29683698296837,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.547265804756837,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from jacket.compute import test
from jacket.compute.virt import event
class TestEvents(test.NoDBTestCase):
def test_event_repr(self):
t = time.time()
uuid = '1234'
lifecycle = event.EVENT_LIFECYCLE_RESUMED
e = event.Event(t)
self.assertEqual(str(e), "<Event: %s>" % t)
e = event.InstanceEvent(uuid, timestamp=t)
self.assertEqual(str(e), "<InstanceEvent: %s, %s>" % (t, uuid))
e = event.LifecycleEvent(uuid, lifecycle, timestamp=t)
self.assertEqual(str(e), "<LifecycleEvent: %s, %s => Resumed>" %
(t, uuid))
| {
"repo_name": "HybridF5/jacket",
"path": "jacket/tests/compute/unit/virt/test_events.py",
"copies": "1",
"size": "1198",
"license": "apache-2.0",
"hash": -5788919400979332000,
"line_mean": 32.2777777778,
"line_max": 75,
"alpha_frac": 0.6719532554,
"autogenerated": false,
"ratio": 3.619335347432024,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4791288602832024,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
from oslo_serialization import jsonutils
import redis
from dragonflow.controller.common import constants
from dragonflow.db import api_nb
from dragonflow.db.drivers import redis_mgt
from dragonflow.db import pub_sub_api
LOG = logging.getLogger(__name__)
class RedisPubSub(pub_sub_api.PubSubApi):
def __init__(self):
super(RedisPubSub, self).__init__()
self.subscriber = RedisSubscriberAgent()
self.publisher = RedisPublisherAgent()
self.redis_mgt = None
def get_publisher(self):
return self.publisher
def get_subscriber(self):
return self.subscriber
class RedisPublisherAgent(pub_sub_api.PublisherAgentBase):
publish_retry_times = 5
def __init__(self):
super(RedisPublisherAgent, self).__init__()
self.remote = None
self.client = None
self.redis_mgt = None
def initialize(self):
# find a publisher server node
super(RedisPublisherAgent, self).initialize()
ip, port = api_nb.get_db_ip_port()
self.redis_mgt = redis_mgt.RedisMgt.get_instance(ip, port)
self._update_client()
def close(self):
if self.remote:
self.client.connection_pool.disconnect()
self.client = None
self.remote = None
def _update_client(self):
if self.redis_mgt is not None:
self.remote = self.redis_mgt.pubsub_select_node_idx()
if self.remote is not None:
ip_port = self.remote.split(':')
self.client = redis.client.StrictRedis(host=ip_port[0],
port=ip_port[1])
def process_ha(self):
# None means that publisher connection should be updated.
# If not None, the publisher connection is still working and is not
# broken by DB single point failure.
if self.remote is None:
self._update_client()
def _sync_master_list(self):
LOG.info("publish connection old masterlist %s",
self.redis_mgt.master_list)
result = self.redis_mgt.redis_get_master_list_from_syncstring(
redis_mgt.RedisMgt.global_sharedlist.raw)
LOG.info("publish connection new masterlist %s",
self.redis_mgt.master_list)
if result:
self._update_client()
def _send_event(self, data, topic):
ttl = self.publish_retry_times
alreadysync = False
while ttl > 0:
ttl -= 1
try:
if self.client is not None:
self.client.publish(topic, data)
break
except Exception:
if not alreadysync:
self._sync_master_list()
alreadysync = True
LOG.exception("publish error remote:%(remote)s ",
{'remote': self.remote})
continue
self.redis_mgt.remove_node_from_master_list(self.remote)
self._update_client()
def set_publisher_for_failover(self, pub, callback):
self.redis_mgt.set_publisher(pub, callback)
def start_detect_for_failover(self):
# only start in NB plugin
if self.redis_mgt is not None:
self.redis_mgt.daemonize()
else:
LOG.warning("redis mgt is none")
class RedisSubscriberAgent(pub_sub_api.SubscriberAgentBase):
def __init__(self):
super(RedisSubscriberAgent, self).__init__()
self.remote = None
self.client = None
self.ip = ""
self.plugin_updates_port = ""
self.pub_sub = None
self.redis_mgt = None
self.is_closed = True
def initialize(self, callback):
# find a subscriber server node and run daemon
super(RedisSubscriberAgent, self).initialize(callback)
ip, port = api_nb.get_db_ip_port()
self.redis_mgt = redis_mgt.RedisMgt.get_instance(ip, port)
self._update_client()
self.is_closed = False
def process_ha(self):
# None means that subscriber connection should be updated.
# If not None, the subscriber connection is still working and is not
# broken by DB single point failure.
if self.remote is None:
self._update_client()
def _update_client(self):
if self.redis_mgt is not None:
self.remote = self.redis_mgt.pubsub_select_node_idx()
if self.remote is not None:
ip_port = self.remote.split(':')
self.client = \
redis.client.StrictRedis(host=ip_port[0], port=ip_port[1])
self.ip = ip_port[0]
self.plugin_updates_port = ip_port[1]
self.pub_sub = self.client.pubsub()
def close(self):
self.redis_mgt = None
self.pub_sub.close()
self.pub_sub = None
self.is_closed = True
def register_topic(self, topic):
self.pub_sub.subscribe(topic)
def unregister_topic(self, topic):
self.pub_sub.unsubscribe(topic)
def set_subscriber_for_failover(self, sub, callback):
self.redis_mgt.set_subscriber(sub, callback)
def register_hamsg_for_db(self):
if self.redis_mgt is not None:
self.redis_mgt.register_ha_topic()
else:
LOG.warning("redis mgt is none")
def run(self):
while not self.is_closed:
time.sleep(0)
try:
if self.pub_sub is not None:
for data in self.pub_sub.listen():
if 'subscribe' == data['type']:
continue
elif 'unsubscribe' == data['type']:
continue
elif 'message' == data['type']:
# Redis management module publishes node list
# on topic 'redis'.
# All other topics are for the user.
if data['channel'] == 'redis':
# redis ha message
message = pub_sub_api.unpack_message(
data['data'])
value = jsonutils.loads(message['value'])
self.redis_mgt.redis_failover_callback(
value)
else:
self._handle_incoming_event(data['data'])
else:
LOG.warning("receive unknown message in "
"subscriber %(type)s",
{'type': data['type']})
else:
LOG.warning("pubsub lost connection %(ip)s:"
"%(port)s",
{'ip': self.ip,
'port': self.plugin_updates_port})
time.sleep(1)
except Exception as e:
LOG.warning("subscriber listening task lost "
"connection "
"%(e)s", {'e': e})
try:
connection = self.pub_sub.connection
connection.connect()
self.pub_sub.on_connect(connection)
# self.db_changes_callback(None, None, 'sync', None, None)
# notify restart
self.db_changes_callback(None, None,
constants.CONTROLLER_DBRESTART,
False, None)
except Exception:
self.redis_mgt.remove_node_from_master_list(self.remote)
self._update_client()
# if pubsub not none notify restart
if self.remote is not None:
# to re-subscribe
self.register_hamsg_for_db()
self.db_changes_callback(
None, None, constants.CONTROLLER_DBRESTART, True,
None)
else:
LOG.warning("there is no more db node available")
LOG.exception("reconnect error %(ip)s:%(port)s",
{'ip': self.ip,
'port': self.plugin_updates_port})
| {
"repo_name": "openstack/dragonflow",
"path": "dragonflow/db/pubsub_drivers/redis_db_pubsub_driver.py",
"copies": "1",
"size": "9188",
"license": "apache-2.0",
"hash": -5339377367661682000,
"line_mean": 36.3495934959,
"line_max": 78,
"alpha_frac": 0.5218763605,
"autogenerated": false,
"ratio": 4.501714845663891,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 246
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest.api.image import base
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest.test import attr
class ImagesTagsNegativeTest(base.BaseV2ImageTest):
@attr(type=['negative', 'gate'])
def test_update_tags_for_non_existing_image(self):
# Update tag with non existing image.
tag = data_utils.rand_name('tag-')
non_exist_image = str(uuid.uuid4())
self.assertRaises(exceptions.NotFound, self.client.add_image_tag,
non_exist_image, tag)
@attr(type=['negative', 'gate'])
def test_delete_non_existing_tag(self):
# Delete non existing tag.
resp, body = self.create_image(container_format='bare',
disk_format='raw',
is_public=True,
)
image_id = body['id']
tag = data_utils.rand_name('non-exist-tag-')
self.addCleanup(self.client.delete_image, image_id)
self.assertRaises(exceptions.NotFound, self.client.delete_image_tag,
image_id, tag)
| {
"repo_name": "BeenzSyed/tempest",
"path": "tempest/api/image/v2/test_images_tags_negative.py",
"copies": "2",
"size": "1717",
"license": "apache-2.0",
"hash": -5589552480926593000,
"line_mean": 38.0227272727,
"line_max": 76,
"alpha_frac": 0.6447291788,
"autogenerated": false,
"ratio": 4.127403846153846,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 44
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslo_policy import policy as oslo_policy
import webob
from nova.api.openstack.compute.legacy_v2.contrib import shelve as shelve_v2
from nova.api.openstack.compute import shelve as shelve_v21
from nova.compute import api as compute_api
from nova import exception
from nova import objects
from nova import policy
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests import uuidsentinel
class ShelvePolicyTestV21(test.NoDBTestCase):
plugin = shelve_v21
prefix = 'os_compute_api:os-shelve'
offload = 'shelve_offload'
def setUp(self):
super(ShelvePolicyTestV21, self).setUp()
self.controller = self.plugin.ShelveController()
self.req = fakes.HTTPRequest.blank('')
def test_shelve_restricted_by_role(self):
rules = {'compute_extension:%sshelve' % self.prefix: 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.assertRaises(exception.Forbidden, self.controller._shelve,
self.req, str(uuid.uuid4()), {})
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_shelve_locked_server(self, mock_instance_get):
instance = objects.Instance(uuid=uuidsentinel.instance1)
mock_instance_get.return_value = instance
self.stubs.Set(compute_api.API, 'shelve',
fakes.fake_actions_to_locked_server)
self.assertRaises(webob.exc.HTTPConflict, self.controller._shelve,
self.req, str(uuid.uuid4()), {})
def test_unshelve_restricted_by_role(self):
rules = {'compute_extension:%sunshelve' % self.prefix: 'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.assertRaises(exception.Forbidden, self.controller._unshelve,
self.req, str(uuid.uuid4()), {})
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_unshelve_locked_server(self, mock_instance_get):
instance = objects.Instance(uuid=uuidsentinel.instance1)
mock_instance_get.return_value = instance
self.stubs.Set(compute_api.API, 'unshelve',
fakes.fake_actions_to_locked_server)
self.assertRaises(webob.exc.HTTPConflict, self.controller._unshelve,
self.req, str(uuid.uuid4()), {})
def test_shelve_offload_restricted_by_role(self):
rules = {'compute_extension:%s%s' % (self.prefix, self.offload):
'role:admin'}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.assertRaises(exception.Forbidden,
self.controller._shelve_offload, self.req,
str(uuid.uuid4()), {})
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_shelve_offload_locked_server(self, mock_instance_get):
instance = objects.Instance(uuid=uuidsentinel.instance1)
mock_instance_get.return_value = instance
self.stubs.Set(compute_api.API, 'shelve_offload',
fakes.fake_actions_to_locked_server)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._shelve_offload,
self.req, str(uuid.uuid4()), {})
class ShelvePolicyTestV2(ShelvePolicyTestV21):
plugin = shelve_v2
prefix = ''
offload = 'shelveOffload'
def _get_instance_other_project(self):
context = self.req.environ['nova.context']
project_id = '%s_unequal' % context.project_id
return objects.Instance(project_id=project_id)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_shelve_allowed(self, mock_instance_get):
mock_instance_get.return_value = self._get_instance_other_project()
rules = {'compute:get': '',
'compute_extension:%sshelve' % self.prefix: ''}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.assertRaises(exception.Forbidden, self.controller._shelve,
self.req, str(uuid.uuid4()), {})
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_unshelve_allowed(self, mock_instance_get):
mock_instance_get.return_value = self._get_instance_other_project()
rules = {'compute:get': '',
'compute_extension:%sunshelve' % self.prefix: ''}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.assertRaises(exception.Forbidden, self.controller._unshelve,
self.req, str(uuid.uuid4()), {})
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_shelve_offload_allowed(self, mock_instance_get):
mock_instance_get.return_value = self._get_instance_other_project()
rules = {'compute:get': '',
'compute_extension:%s%s' % (self.prefix, self.offload): ''}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.assertRaises(exception.Forbidden,
self.controller._shelve_offload,
self.req,
str(uuid.uuid4()), {})
class ShelvePolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(ShelvePolicyEnforcementV21, self).setUp()
self.controller = shelve_v21.ShelveController()
self.req = fakes.HTTPRequest.blank('')
def test_shelve_policy_failed(self):
rule_name = "os_compute_api:os-shelve:shelve"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller._shelve, self.req, fakes.FAKE_UUID,
body={'shelve': {}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_shelve_offload_policy_failed(self):
rule_name = "os_compute_api:os-shelve:shelve_offload"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller._shelve_offload, self.req, fakes.FAKE_UUID,
body={'shelve_offload': {}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_unshelve_policy_failed(self):
rule_name = "os_compute_api:os-shelve:unshelve"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller._unshelve, self.req, fakes.FAKE_UUID,
body={'unshelve': {}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
| {
"repo_name": "zhimin711/nova",
"path": "nova/tests/unit/api/openstack/compute/test_shelve.py",
"copies": "1",
"size": "7325",
"license": "apache-2.0",
"hash": 1978921237611982300,
"line_mean": 41.5872093023,
"line_max": 78,
"alpha_frac": 0.6460068259,
"autogenerated": false,
"ratio": 3.7032355915065724,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9842344016616342,
"avg_score": 0.0013796801580460568,
"num_lines": 172
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from rally.plugins.openstack import scenario
from rally.task import atomic
from rally.task import utils
CONF = cfg.CONF
class EC2Scenario(scenario.OpenStackScenario):
"""Base class for EC2 scenarios with basic atomic actions."""
@atomic.action_timer("ec2.list_servers")
def _list_servers(self):
"""Returns user servers list."""
return self.clients("ec2").get_only_instances()
@atomic.action_timer("ec2.boot_servers")
def _boot_servers(self, image_id, flavor_name,
instance_num=1, **kwargs):
"""Boot multiple servers.
Returns when all the servers are actually booted and are in the
"Running" state.
:param image_id: ID of the image to be used for server creation
:param flavor_name: Name of the flavor to be used for server creation
:param instance_num: Number of instances to boot
:param kwargs: Other optional parameters to boot servers
:returns: List of created server objects
"""
reservation = self.clients("ec2").run_instances(
image_id=image_id,
instance_type=flavor_name,
min_count=instance_num,
max_count=instance_num,
**kwargs)
servers = [instance for instance in reservation.instances]
self.sleep_between(CONF.benchmark.ec2_server_boot_prepoll_delay)
servers = [utils.wait_for(
server,
ready_statuses=["RUNNING"],
update_resource=self._update_resource,
timeout=CONF.benchmark.ec2_server_boot_timeout,
check_interval=CONF.benchmark.ec2_server_boot_poll_interval
) for server in servers]
return servers
def _update_resource(self, resource):
resource.update()
return resource
| {
"repo_name": "yeming233/rally",
"path": "rally/plugins/openstack/scenarios/ec2/utils.py",
"copies": "1",
"size": "2407",
"license": "apache-2.0",
"hash": 9104565299464126000,
"line_mean": 33.884057971,
"line_max": 77,
"alpha_frac": 0.6705442459,
"autogenerated": false,
"ratio": 4.215411558669002,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 69
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import versionutils
from oslo_policy import policy
from designate.common.policies import base
DEPERCATED_REASON = """
The tsigkey API now supports system scope and default roles.
"""
deprecated_create_tsigkey = policy.DeprecatedRule(
name="create_tsigkey",
check_str=base.RULE_ADMIN,
deprecated_reason=DEPERCATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_find_tsigkeys = policy.DeprecatedRule(
name="find_tsigkeys",
check_str=base.RULE_ADMIN,
deprecated_reason=DEPERCATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_get_tsigkey = policy.DeprecatedRule(
name="get_tsigkey",
check_str=base.RULE_ADMIN,
deprecated_reason=DEPERCATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_update_tsigkey = policy.DeprecatedRule(
name="update_tsigkey",
check_str=base.RULE_ADMIN,
deprecated_reason=DEPERCATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_delete_tsigkey = policy.DeprecatedRule(
name="delete_tsigkey",
check_str=base.RULE_ADMIN,
deprecated_reason=DEPERCATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
rules = [
policy.DocumentedRuleDefault(
name="create_tsigkey",
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description="Create Tsigkey",
operations=[
{
'path': '/v2/tsigkeys',
'method': 'POST'
}
],
deprecated_rule=deprecated_create_tsigkey
),
policy.DocumentedRuleDefault(
name="find_tsigkeys",
check_str=base.SYSTEM_READER,
scope_types=['system'],
description="List Tsigkeys",
operations=[
{
'path': '/v2/tsigkeys',
'method': 'GET'
}
],
deprecated_rule=deprecated_find_tsigkeys
),
policy.DocumentedRuleDefault(
name="get_tsigkey",
check_str=base.SYSTEM_READER,
scope_types=['system'],
description="Show a Tsigkey",
operations=[
{
'path': '/v2/tsigkeys/{tsigkey_id}',
'method': 'PATCH'
}, {
'path': '/v2/tsigkeys/{tsigkey_id}',
'method': 'GET'
}
],
deprecated_rule=deprecated_get_tsigkey
),
policy.DocumentedRuleDefault(
name="update_tsigkey",
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description="Update Tsigkey",
operations=[
{
'path': '/v2/tsigkeys/{tsigkey_id}',
'method': 'PATCH'
}
],
deprecated_rule=deprecated_update_tsigkey
),
policy.DocumentedRuleDefault(
name="delete_tsigkey",
check_str=base.SYSTEM_ADMIN,
scope_types=['system'],
description="Delete a Tsigkey",
operations=[
{
'path': '/v2/tsigkeys/{tsigkey_id}',
'method': 'DELETE'
}
],
deprecated_rule=deprecated_delete_tsigkey
)
]
def list_rules():
return rules
| {
"repo_name": "openstack/designate",
"path": "designate/common/policies/tsigkey.py",
"copies": "1",
"size": "3815",
"license": "apache-2.0",
"hash": -3160115602031640000,
"line_mean": 28.3461538462,
"line_max": 78,
"alpha_frac": 0.6136304063,
"autogenerated": false,
"ratio": 3.788480635551142,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49021110418511415,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.benchmark import context
from rally.common.i18n import _
from rally.common import log as logging
from rally.common import utils as rutils
from rally import objects
from rally import osclients
LOG = logging.getLogger(__name__)
# NOTE(boris-42): This context should be hidden for now and used only by
# benchmark engine. In future during various refactoring of
# validation system and rally CI testing we will make it public
@context.context(name="existing_users", order=99, hidden=True)
class ExistingUsers(context.Context):
"""This context supports using existing users in Rally.
It uses information about deployment to properly
initialize context["users"] and context["tenants"]
So there won't be big difference between usage of "users" and
"existing_users" context.
"""
# NOTE(boris-42): We don't need to check config schema because
# this is used only by benchmark engine
CONFIG_SCHEMA = {}
def __init__(self, ctx):
super(ExistingUsers, self).__init__(ctx)
self.context["users"] = []
self.context["tenants"] = {}
@rutils.log_task_wrapper(LOG.info, _("Enter context: `existing_users`"))
def setup(self):
for user in self.config:
user_endpoint = objects.Endpoint(**user)
user_kclient = osclients.Clients(user_endpoint).keystone()
if user_kclient.tenant_id not in self.context["tenants"]:
self.context["tenants"][user_kclient.tenant_id] = {
"id": user_kclient.tenant_id,
"name": user_kclient.tenant_name
}
self.context["users"].append({
"endpoint": user_endpoint,
"id": user_kclient.user_id,
"tenant_id": user_kclient.tenant_id
})
@rutils.log_task_wrapper(LOG.info, _("Exit context: `existing_users`"))
def cleanup(self):
"""These users are not managed by Rally, so don't touch them."""
| {
"repo_name": "vponomaryov/rally",
"path": "rally/plugins/openstack/context/existing_users.py",
"copies": "1",
"size": "2614",
"license": "apache-2.0",
"hash": -8003209098813009000,
"line_mean": 36.3428571429,
"line_max": 79,
"alpha_frac": 0.6526396327,
"autogenerated": false,
"ratio": 4.09717868338558,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5249818316085579,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from manila import context
from manila.image import glance
from manila import test
from manila.tests import utils as test_utils
class FakeGlanceClient(object):
class Image(object):
def list(self, *args, **kwargs):
return [{'id': 'id1'}, {'id': 'id2'}]
def __getattr__(self, item):
return None
def __init__(self):
self.image = self.Image()
def get_fake_auth_obj():
return type('FakeAuthObj', (object, ), {'get_client': mock.Mock()})
class GlanceClientTestCase(test.TestCase):
@mock.patch('manila.image.glance.AUTH_OBJ', None)
def test_no_auth_obj(self):
mock_client_loader = self.mock_object(
glance.client_auth, 'AuthClientLoader')
fake_context = 'fake_context'
data = {
'glance': {
'api_microversion': 'foo_api_microversion',
'region_name': 'foo_region_name'
}
}
with test_utils.create_temp_config_with_opts(data):
glance.glanceclient(fake_context)
mock_client_loader.assert_called_once_with(
client_class=glance.glance_client.Client,
cfg_group=glance.GLANCE_GROUP
)
mock_client_loader.return_value.get_client.assert_called_once_with(
fake_context,
version=data['glance']['api_microversion'],
region_name=data['glance']['region_name']
)
@mock.patch('manila.image.glance.AUTH_OBJ', get_fake_auth_obj())
def test_with_auth_obj(self):
fake_context = 'fake_context'
data = {
'glance': {
'api_microversion': 'foo_api_microversion',
'region_name': 'foo_region_name'
}
}
with test_utils.create_temp_config_with_opts(data):
glance.glanceclient(fake_context)
glance.AUTH_OBJ.get_client.assert_called_once_with(
fake_context,
version=data['glance']['api_microversion'],
region_name=data['glance']['region_name']
)
class GlanceApiTestCase(test.TestCase):
def setUp(self):
super(GlanceApiTestCase, self).setUp()
self.api = glance.API()
self.glanceclient = FakeGlanceClient()
self.ctx = context.get_admin_context()
self.mock_object(glance, 'glanceclient',
mock.Mock(return_value=self.glanceclient))
def test_image_list_glanceclient_has_no_proxy(self):
image_list = ['fake', 'image', 'list']
class FakeGlanceClient(object):
def list(self):
return image_list
self.glanceclient.glance = FakeGlanceClient()
result = self.api.image_list(self.ctx)
self.assertEqual(image_list, result)
def test_image_list_glanceclient_has_proxy(self):
image_list1 = ['fake', 'image', 'list1']
image_list2 = ['fake', 'image', 'list2']
class FakeImagesClient(object):
def list(self):
return image_list1
class FakeGlanceClient(object):
def list(self):
return image_list2
self.glanceclient.images = FakeImagesClient()
self.glanceclient.glance = FakeGlanceClient()
result = self.api.image_list(self.ctx)
self.assertEqual(image_list1, result)
| {
"repo_name": "openstack/manila",
"path": "manila/tests/image/test_image.py",
"copies": "1",
"size": "3963",
"license": "apache-2.0",
"hash": 5426399547327903000,
"line_mean": 29.7209302326,
"line_max": 78,
"alpha_frac": 0.6076204895,
"autogenerated": false,
"ratio": 3.8701171875,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49777376770000004,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests to Glance.
"""
from glanceclient import client as glance_client
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
from manila.common import client_auth
from manila.common.config import core_opts
from manila.db import base
GLANCE_GROUP = 'glance'
AUTH_OBJ = None
glance_opts = [
cfg.StrOpt('api_microversion',
default='2',
help='Version of Glance API to be used.'),
cfg.StrOpt('region_name',
default='RegionOne',
help='Region name for connecting to glance.'),
]
CONF = cfg.CONF
CONF.register_opts(core_opts)
CONF.register_opts(glance_opts, GLANCE_GROUP)
ks_loading.register_session_conf_options(CONF, GLANCE_GROUP)
ks_loading.register_auth_conf_options(CONF, GLANCE_GROUP)
def list_opts():
return client_auth.AuthClientLoader.list_opts(GLANCE_GROUP)
def glanceclient(context):
global AUTH_OBJ
if not AUTH_OBJ:
AUTH_OBJ = client_auth.AuthClientLoader(
client_class=glance_client.Client, cfg_group=GLANCE_GROUP)
return AUTH_OBJ.get_client(context,
version=CONF[GLANCE_GROUP].api_microversion,
region_name=CONF[GLANCE_GROUP].region_name)
class API(base.Base):
"""API for interacting with glanceclient."""
def image_list(self, context):
client = glanceclient(context)
if hasattr(client, 'images'):
return client.images.list()
return client.glance.list()
| {
"repo_name": "openstack/manila",
"path": "manila/image/glance.py",
"copies": "1",
"size": "2098",
"license": "apache-2.0",
"hash": 3191799396880021500,
"line_mean": 29.8529411765,
"line_max": 75,
"alpha_frac": 0.6897044805,
"autogenerated": false,
"ratio": 3.835466179159049,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 68
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from os_brick import exception
from os_brick import executor
from os_brick import initiator
class InitiatorConnector(executor.Executor, metaclass=abc.ABCMeta):
# This object can be used on any platform (x86, S390)
platform = initiator.PLATFORM_ALL
# This object can be used on any os type (linux, windows)
os_type = initiator.OS_TYPE_ALL
def __init__(self, root_helper, driver=None, execute=None,
device_scan_attempts=initiator.DEVICE_SCAN_ATTEMPTS_DEFAULT,
*args, **kwargs):
super(InitiatorConnector, self).__init__(root_helper, execute=execute,
*args, **kwargs)
self.device_scan_attempts = device_scan_attempts
def set_driver(self, driver):
"""The driver is used to find used LUNs."""
self.driver = driver
@staticmethod
@abc.abstractmethod
def get_connector_properties(root_helper, *args, **kwargs):
"""The generic connector properties."""
pass
@abc.abstractmethod
def check_valid_device(self, path, run_as_root=True):
"""Test to see if the device path is a real device.
:param path: The file system path for the device.
:type path: str
:param run_as_root: run the tests as root user?
:type run_as_root: bool
:returns: bool
"""
pass
@abc.abstractmethod
def connect_volume(self, connection_properties):
"""Connect to a volume.
The connection_properties describes the information needed by
the specific protocol to use to make the connection.
The connection_properties is a dictionary that describes the target
volume. It varies slightly by protocol type (iscsi, fibre_channel),
but the structure is usually the same.
An example for iSCSI:
{'driver_volume_type': 'iscsi',
'data': {
'target_luns': [0, 2],
'target_iqns': ['iqn.2000-05.com.3pardata:20810002ac00383d',
'iqn.2000-05.com.3pardata:21810002ac00383d'],
'target_discovered': True,
'encrypted': False,
'qos_specs': None,
'target_portals': ['10.52.1.11:3260', '10.52.2.11:3260'],
'access_mode': 'rw',
}}
An example for fibre_channel with single lun:
{'driver_volume_type': 'fibre_channel',
'data': {
'initiator_target_map': {'100010604b010459': ['20210002AC00383D'],
'100010604b01045d': ['20220002AC00383D']},
'target_discovered': True,
'encrypted': False,
'qos_specs': None,
'target_lun': 1,
'access_mode': 'rw',
'target_wwn': [
'20210002AC00383D',
'20220002AC00383D',
],
}}
An example for fibre_channel target_wwns and with different LUNs and
all host ports mapped to target ports:
{'driver_volume_type': 'fibre_channel',
'data': {
'initiator_target_map': {
'100010604b010459': ['20210002AC00383D', '20220002AC00383D'],
'100010604b01045d': ['20210002AC00383D', '20220002AC00383D']
},
'target_discovered': True,
'encrypted': False,
'qos_specs': None,
'target_luns': [1, 2],
'access_mode': 'rw',
'target_wwns': ['20210002AC00383D', '20220002AC00383D'],
}}
For FC the dictionary could also present the enable_wildcard_scan key
with a boolean value (defaults to True) in case a driver doesn't want
OS-Brick to use a SCSI scan with wildcards when the FC initiator on
the host doesn't find any target port.
This is useful for drivers that know that sysfs gets populated
whenever there's a connection between the host's HBA and the storage
array's target ports.
:param connection_properties: The dictionary that describes all
of the target volume attributes.
:type connection_properties: dict
:returns: dict
"""
pass
@abc.abstractmethod
def disconnect_volume(self, connection_properties, device_info,
force=False, ignore_errors=False):
"""Disconnect a volume from the local host.
The connection_properties are the same as from connect_volume.
The device_info is returned from connect_volume.
:param connection_properties: The dictionary that describes all
of the target volume attributes.
:type connection_properties: dict
:param device_info: historical difference, but same as connection_props
:type device_info: dict
:param force: Whether to forcefully disconnect even if flush fails.
:type force: bool
:param ignore_errors: When force is True, this will decide whether to
ignore errors or raise an exception once finished
the operation. Default is False.
:type ignore_errors: bool
"""
pass
@abc.abstractmethod
def get_volume_paths(self, connection_properties):
"""Return the list of existing paths for a volume.
The job of this method is to find out what paths in
the system are associated with a volume as described
by the connection_properties.
:param connection_properties: The dictionary that describes all
of the target volume attributes.
:type connection_properties: dict
"""
pass
@abc.abstractmethod
def get_search_path(self):
"""Return the directory where a Connector looks for volumes.
Some Connectors need the information in the
connection_properties to determine the search path.
"""
pass
@abc.abstractmethod
def extend_volume(self, connection_properties):
"""Update the attached volume's size.
This method will attempt to update the local hosts's
volume after the volume has been extended on the remote
system. The new volume size in bytes will be returned.
If there is a failure to update, then None will be returned.
:param connection_properties: The volume connection properties.
:returns: new size of the volume.
"""
pass
@abc.abstractmethod
def get_all_available_volumes(self, connection_properties=None):
"""Return all volumes that exist in the search directory.
At connect_volume time, a Connector looks in a specific
directory to discover a volume's paths showing up.
This method's job is to return all paths in the directory
that connect_volume uses to find a volume.
This method is used in coordination with get_volume_paths()
to verify that volumes have gone away after disconnect_volume
has been called.
:param connection_properties: The dictionary that describes all
of the target volume attributes.
:type connection_properties: dict
"""
pass
def check_IO_handle_valid(self, handle, data_type, protocol):
"""Check IO handle has correct data type."""
if (handle and not isinstance(handle, data_type)):
raise exception.InvalidIOHandleObject(
protocol=protocol,
actual_type=type(handle))
| {
"repo_name": "openstack/os-brick",
"path": "os_brick/initiator/initiator_connector.py",
"copies": "1",
"size": "8262",
"license": "apache-2.0",
"hash": 7836207841179509000,
"line_mean": 36.7260273973,
"line_max": 79,
"alpha_frac": 0.6107480029,
"autogenerated": false,
"ratio": 4.504907306434024,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5615655309334024,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import jsonschema
import mock
from rally.plugins.openstack.context.glance import images
from tests.unit import fakes
from tests.unit import test
CTX = "rally.plugins.openstack.context.glance"
SCN = "rally.plugins.openstack.scenarios.glance"
class ImageGeneratorTestCase(test.ScenarioTestCase):
def _gen_tenants(self, count):
tenants = {}
for id_ in range(count):
tenants[str(id_)] = {"name": str(id_)}
return tenants
def test_init_validation(self):
self.context["config"] = {
"images": {
"image_url": "mock_url"
}
}
self.assertRaises(jsonschema.ValidationError,
images.ImageGenerator.validate, self.context)
@mock.patch("%s.utils.GlanceScenario._create_image" % SCN,
return_value=fakes.FakeImage(id="uuid"))
def test_setup(self, mock_glance_scenario__create_image):
tenants_count = 2
users_per_tenant = 5
images_per_tenant = 5
tenants = self._gen_tenants(tenants_count)
users = []
for id_ in tenants:
for i in range(users_per_tenant):
users.append({"id": i, "tenant_id": id_,
"endpoint": mock.MagicMock()})
self.context.update({
"config": {
"users": {
"tenants": tenants_count,
"users_per_tenant": users_per_tenant,
"concurrent": 10,
},
"images": {
"image_url": "mock_url",
"image_type": "qcow2",
"image_container": "bare",
"images_per_tenant": images_per_tenant,
"image_name": "some_name",
"min_ram": 128,
"min_disk": 1,
}
},
"admin": {
"endpoint": mock.MagicMock()
},
"users": users,
"tenants": tenants
})
new_context = copy.deepcopy(self.context)
for id_ in new_context["tenants"].keys():
new_context["tenants"][id_].setdefault("images", [])
for j in range(images_per_tenant):
new_context["tenants"][id_]["images"].append("uuid")
images_ctx = images.ImageGenerator(self.context)
images_ctx.setup()
self.assertEqual(new_context, self.context)
@mock.patch("%s.images.resource_manager.cleanup" % CTX)
def test_cleanup(self, mock_cleanup):
tenants_count = 2
users_per_tenant = 5
images_per_tenant = 5
tenants = self._gen_tenants(tenants_count)
users = []
for id_ in tenants:
for i in range(users_per_tenant):
users.append({"id": i, "tenant_id": id_,
"endpoint": "endpoint"})
tenants[id_].setdefault("images", [])
for j in range(images_per_tenant):
tenants[id_]["images"].append("uuid")
self.context.update({
"config": {
"users": {
"tenants": 2,
"users_per_tenant": 5,
"concurrent": 10,
},
"images": {
"image_url": "mock_url",
"image_type": "qcow2",
"image_container": "bare",
"images_per_tenant": 5,
"image_name": "some_name",
"min_ram": 128,
"min_disk": 1,
}
},
"admin": {
"endpoint": mock.MagicMock()
},
"users": users,
"tenants": tenants
})
images_ctx = images.ImageGenerator(self.context)
images_ctx.cleanup()
mock_cleanup.assert_called_once_with(names=["glance.images"],
users=self.context["users"])
| {
"repo_name": "aplanas/rally",
"path": "tests/unit/plugins/openstack/context/glance/test_images.py",
"copies": "9",
"size": "4605",
"license": "apache-2.0",
"hash": -7774013809796684000,
"line_mean": 31.8928571429,
"line_max": 75,
"alpha_frac": 0.5094462541,
"autogenerated": false,
"ratio": 4.263888888888889,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9273335142988889,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import elasticsearch
from oslo_config import cfg
from oslo_log import log
import six
from freezer_api.common import db_mappings
CONF = cfg.CONF
LOG = log.getLogger(__name__)
DEFAULT_INDEX = 'freezer'
DEFAULT_REPLICAS = 0
class ElasticSearchManager(object):
"""
Managing ElasticSearch mappings operations
Sync: create mappings
Update: Update mappings
remove: deletes the mappings
show: print out all the mappings
"""
def __init__(self, **options):
self.mappings = db_mappings.get_mappings().copy()
self.conf = options.copy()
self.index = self.conf['index']
self.elk = elasticsearch.Elasticsearch(**options)
# check if the cluster is up or not !
if not self.elk.ping():
raise Exception('ElasticSearch cluster is not available. '
'Cannot ping it')
# clear the index cache
try:
self.elk.indices.clear_cache(index=self.conf['index'])
except Exception as e:
LOG.warning(e)
def _check_index_exists(self, index):
LOG.info('check if index: {0} exists or not'.format(index))
try:
return self.elk.indices.exists(index=index)
except elasticsearch.TransportError:
raise
def _check_mapping_exists(self, mappings):
LOG.info('check if mappings: {0} exists or not'.format(mappings))
return self.elk.indices.exists_type(index=self.index,
doc_type=mappings)
def get_required_mappings(self):
"""
This function checks if the user chooses a certain mappings or not.
If the user has chosen a certain mappings it will return these mappings
only If not it will return all mappings to be updated
:return:
"""
# check if the user asked to update only one mapping ( -m is provided )
mappings = {}
if self.conf['select_mapping']:
if self.conf['select_mapping'] not in self.mappings.keys():
raise Exception(
'Selected mappings {0} does not exists. Please, choose '
'one of {1}'.format(self.conf['select_mapping'],
self.mappings.keys()
)
)
mappings[self.conf['select_mapping']] = \
self.mappings.get(self.conf['select_mapping'])
else:
mappings = self.mappings
return mappings
def db_sync(self):
"""
Create or update elasticsearch db mappings
steps:
1) check if mappings exists
2) remove mapping if erase is passed
3) update mappings if - y is passed
4) if update failed ask for permission to remove old mappings
5) try to update again
6) if update succeeded exit :)
:return:
"""
# check if erase provided remove mappings first
if self.conf.get('erase'):
self.remove_mappings()
# check if index does not exists create it
if not self._check_index_exists(self.index):
self._create_index()
_mappings = self.get_required_mappings()
# create/update one by one
for doc_type, body in _mappings.items():
check = self.create_one_mapping(doc_type, body)
if check:
print("Creating or Updating {0} is {1}".format(
doc_type, check.get('acknowledged')))
else:
print("Couldn't update {0}. Request returned {1}".format(
doc_type, check.get('acknowledged')))
def _create_index(self):
"""
Create the index that will allow us to put the mappings under it
:return: {u'acknowledged': True} if success or None if index exists
"""
if not self._check_index_exists(index=self.index):
body = {
'number_of_replicas':
self.conf['number_of_replicas'] or DEFAULT_REPLICAS
}
return self.elk.indices.create(index=self.index, body=body)
def delete_index(self):
return self.elk.indices.delete(index=self.index)
def create_one_mapping(self, doc_type, body):
"""
Create one document type and update its mappings
:param doc_type: the document type to be created jobs, clients, backups
:param body: the structure of the document
:return: dict
"""
# check if doc_type exists or not
if self._check_mapping_exists(doc_type):
do_update = self.prompt(
'[[[ {0} ]]] already exists in index => {1}'
' <= Do you want to update it ? (y/n) '.format(doc_type,
self.index)
)
if do_update:
# Call elasticsearch library and put the mappings
return self.elk.indices.put_mapping(doc_type=doc_type,
body=body,
index=self.index
)
else:
return {'acknowledged': False}
return self.elk.indices.put_mapping(doc_type=doc_type, body=body,
index=self.index)
def remove_one_mapping(self, doc_type):
"""
Removes one mapping at a time
:param doc_type: document type to be removed
:return: dict
"""
LOG.info('Removing mapping {0} from index {1}'.format(doc_type,
self.index))
try:
return self.elk.indices.delete_mapping(self.index,
doc_type=doc_type)
except Exception:
raise
def remove_mappings(self):
"""
Remove mappings from elasticsearch
:return: dict
"""
# check if index doesn't exist return
if not self._check_index_exists(index=self.index):
print("Index {0} doesn't exists.".format(self.index))
return
# remove mappings
self.delete_index()
def update_mappings(self):
"""
Update mappings
:return: dict
"""
self.conf['yes'] = True
return self.db_sync()
def show_mappings(self):
"""
Print existing mappings in an index
:return: dict
"""
# check if index doesn't exist return
if not self._check_index_exists(index=self.index):
LOG.debug("Index {0} doesn't exists.".format(self.index))
return
return self.elk.indices.get_mapping(index=self.index)
def update_settings(self):
"""
Update number of replicas
:return: dict
"""
body = {
'number_of_replicas':
self.conf['number_of_replicas'] or DEFAULT_REPLICAS
}
return self.elk.indices.put_settings(body=body, index=self.index)
def prompt(self, message):
"""
Helper function that is being used to ask the user for confirmation
:param message: Message to be printed (To ask the user to confirm ...)
:return: True or False
"""
if self.conf['yes']:
return self.conf['yes']
while True:
ans = six.input(message)
if ans.lower() == 'y':
return True
elif ans.lower() == 'n':
return False
| {
"repo_name": "szaher/freezer-api",
"path": "freezer_api/db/elasticsearch/es_manager.py",
"copies": "1",
"size": "8266",
"license": "apache-2.0",
"hash": 4851870462815226000,
"line_mean": 34.7835497835,
"line_max": 79,
"alpha_frac": 0.5488749093,
"autogenerated": false,
"ratio": 4.509547190398254,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5558422099698255,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import itertools
import sys
from mox3 import mox
from oslo_serialization import jsonutils
from neutronclient.common import exceptions
from neutronclient.neutron.v2_0 import network
from neutronclient import shell
from neutronclient.tests.unit import test_cli20
class CLITestV20NetworkJSON(test_cli20.CLITestV20Base):
def setUp(self):
super(CLITestV20NetworkJSON, self).setUp(plurals={'tags': 'tag'})
def test_create_network(self):
# Create net: myname.
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = [name, ]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_network_with_unicode(self):
# Create net: u'\u7f51\u7edc'.
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = u'\u7f51\u7edc'
myid = 'myid'
args = [name, ]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_network_description(self):
# Create net: --tenant_id tenantid myname.
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = ['--description', 'Nice network', name]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
description='Nice network')
def test_create_network_tenant(self):
# Create net: --tenant_id tenantid myname.
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = ['--tenant_id', 'tenantid', name]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
# Test dashed options
args = ['--tenant-id', 'tenantid', name]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_network_provider_args(self):
# Create net: with --provider arguments.
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
# Test --provider attributes before network name
args = ['--provider:network_type', 'vlan',
'--provider:physical_network', 'physnet1',
'--provider:segmentation_id', '400', name]
position_names = ['provider:network_type',
'provider:physical_network',
'provider:segmentation_id', 'name']
position_values = ['vlan', 'physnet1', '400', name]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_network_tags(self):
# Create net: myname --tags a b.
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = [name, '--tags', 'a', 'b']
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tags=['a', 'b'])
def test_create_network_state(self):
# Create net: --admin_state_down myname.
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = ['--admin_state_down', name, ]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
admin_state_up=False)
# Test dashed options
args = ['--admin-state-down', name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
admin_state_up=False)
def test_create_network_vlan_transparent(self):
# Create net: myname --vlan-transparent True.
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = ['--vlan-transparent', 'True', name]
vlantrans = {'vlan_transparent': 'True'}
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
**vlantrans)
def test_create_network_with_qos_policy(self):
# Create net: --qos-policy mypolicy.
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
qos_policy_name = 'mypolicy'
args = [name, '--qos-policy', qos_policy_name]
position_names = ['name', 'qos_policy_id']
position_values = [name, qos_policy_name]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_network_with_az_hint(self):
# Create net: --availability-zone-hint zone1
# --availability-zone-hint zone2.
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = ['--availability-zone-hint', 'zone1',
'--availability-zone-hint', 'zone2', name]
position_names = ['availability_zone_hints', 'name']
position_values = [['zone1', 'zone2'], name]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_network_with_dns_domain(self):
# Create net: --dns-domain my-domain.org.
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
dns_domain_name = 'my-domain.org.'
args = [name, '--dns-domain', dns_domain_name]
position_names = ['name', 'dns_domain']
position_values = [name, dns_domain_name]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_list_nets_empty_with_column(self):
resources = "networks"
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: []}
resstr = self.client.serialize(reses)
# url method body
query = "id=myfakeid"
args = ['-c', 'id', '--', '--id', 'myfakeid']
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.MyUrlComparator(test_cli20.end_url(path, query),
self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token',
test_cli20.TOKEN)).AndReturn(
(test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertEqual('\n', _str)
def _test_list_networks(self, cmd, detail=False, tags=(),
fields_1=(), fields_2=(), page_size=None,
sort_key=(), sort_dir=(), base_args=None,
query=''):
resources = "networks"
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
self._test_list_resources(resources, cmd, detail, tags,
fields_1, fields_2, page_size=page_size,
sort_key=sort_key, sort_dir=sort_dir,
base_args=base_args, query=query)
def test_list_nets_pagination(self):
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
self._test_list_resources_with_pagination("networks", cmd)
def test_list_nets_sort(self):
# list nets:
# --sort-key name --sort-key id --sort-dir asc --sort-dir desc
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, sort_key=['name', 'id'],
sort_dir=['asc', 'desc'])
def test_list_nets_sort_with_keys_more_than_dirs(self):
# list nets: --sort-key name --sort-key id --sort-dir desc
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, sort_key=['name', 'id'],
sort_dir=['desc'])
def test_list_nets_sort_with_dirs_more_than_keys(self):
# list nets: --sort-key name --sort-dir desc --sort-dir asc
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, sort_key=['name'],
sort_dir=['desc', 'asc'])
def test_list_nets_limit(self):
# list nets: -P.
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, page_size=1000)
def test_list_nets_detail(self):
# list nets: -D.
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, True)
def test_list_nets_tags(self):
# List nets: -- --tags a b.
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, tags=['a', 'b'])
def test_list_nets_tags_with_unicode(self):
# List nets: -- --tags u'\u7f51\u7edc'.
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, tags=[u'\u7f51\u7edc'])
def test_list_nets_detail_tags(self):
# List nets: -D -- --tags a b.
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, detail=True, tags=['a', 'b'])
def _test_list_nets_extend_subnets(self, data, expected):
def setup_list_stub(resources, data, query):
reses = {resources: data}
resstr = self.client.serialize(reses)
resp = (test_cli20.MyResp(200), resstr)
path = getattr(self.client, resources + '_path')
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path, query), self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(resp)
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, 'get_client')
self.mox.StubOutWithMock(self.client.httpclient, 'request')
cmd.get_client().AndReturn(self.client)
setup_list_stub('networks', data, '')
cmd.get_client().AndReturn(self.client)
filters = ''
for n in data:
for s in n['subnets']:
filters = filters + "&id=%s" % s
setup_list_stub('subnets',
[{'id': 'mysubid1', 'cidr': '192.168.1.0/24'},
{'id': 'mysubid2', 'cidr': '172.16.0.0/24'},
{'id': 'mysubid3', 'cidr': '10.1.1.0/24'}],
query='fields=id&fields=cidr' + filters)
self.mox.ReplayAll()
args = []
cmd_parser = cmd.get_parser('list_networks')
parsed_args = cmd_parser.parse_args(args)
result = cmd.take_action(parsed_args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_result = [x for x in result[1]]
self.assertEqual(len(expected), len(_result))
for res, exp in zip(_result, expected):
self.assertEqual(len(exp), len(res))
for obsrvd, expctd in zip(res, exp):
self.assertEqual(expctd, obsrvd)
def test_list_nets_extend_subnets(self):
data = [{'id': 'netid1', 'name': 'net1', 'subnets': ['mysubid1']},
{'id': 'netid2', 'name': 'net2', 'subnets': ['mysubid2',
'mysubid3']}]
# id, name, subnets
expected = [('netid1', 'net1', 'mysubid1 192.168.1.0/24'),
('netid2', 'net2',
'mysubid2 172.16.0.0/24\nmysubid3 10.1.1.0/24')]
self._test_list_nets_extend_subnets(data, expected)
def test_list_nets_extend_subnets_no_subnet(self):
data = [{'id': 'netid1', 'name': 'net1', 'subnets': ['mysubid1']},
{'id': 'netid2', 'name': 'net2', 'subnets': ['mysubid4']}]
# id, name, subnets
expected = [('netid1', 'net1', 'mysubid1 192.168.1.0/24'),
('netid2', 'net2', 'mysubid4 ')]
self._test_list_nets_extend_subnets(data, expected)
def test_list_nets_fields(self):
# List nets: --fields a --fields b -- --fields c d.
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd,
fields_1=['a', 'b'], fields_2=['c', 'd'])
def _test_list_nets_columns(self, cmd, returned_body,
args=('-f', 'json')):
resources = 'networks'
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
self._test_list_columns(cmd, resources, returned_body, args=args)
def test_list_nets_defined_column(self):
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
returned_body = {"networks": [{"name": "buildname3",
"id": "id3",
"tenant_id": "tenant_3",
"subnets": []}]}
self._test_list_nets_columns(cmd, returned_body,
args=['-f', 'json', '-c', 'id'])
_str = self.fake_stdout.make_string()
returned_networks = jsonutils.loads(_str)
self.assertEqual(1, len(returned_networks))
net = returned_networks[0]
self.assertEqual(1, len(net))
self.assertIn("id", net.keys())
def test_list_nets_with_default_column(self):
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
returned_body = {"networks": [{"name": "buildname3",
"id": "id3",
"tenant_id": "tenant_3",
"subnets": []}]}
self._test_list_nets_columns(cmd, returned_body)
_str = self.fake_stdout.make_string()
returned_networks = jsonutils.loads(_str)
self.assertEqual(1, len(returned_networks))
net = returned_networks[0]
self.assertEqual(3, len(net))
self.assertEqual(0, len(set(net) ^ set(cmd.list_columns)))
def test_list_external_nets_empty_with_column(self):
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: []}
resstr = self.client.serialize(reses)
# url method body
query = "router%3Aexternal=True&id=myfakeid"
args = ['-c', 'id', '--', '--id', 'myfakeid']
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path, query), self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token',
test_cli20.TOKEN)).AndReturn(
(test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertEqual('\n', _str)
def _test_list_external_nets(self, resources, cmd,
detail=False, tags=(),
fields_1=(), fields_2=()):
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: [{'id': 'myid1', },
{'id': 'myid2', }, ], }
resstr = self.client.serialize(reses)
# url method body
query = ""
args = detail and ['-D', ] or []
if fields_1:
for field in fields_1:
args.append('--fields')
args.append(field)
if tags:
args.append('--')
args.append("--tag")
for tag in tags:
args.append(tag)
if (not tags) and fields_2:
args.append('--')
if fields_2:
args.append("--fields")
for field in fields_2:
args.append(field)
for field in itertools.chain(fields_1, fields_2):
if query:
query += "&fields=" + field
else:
query = "fields=" + field
if query:
query += '&router%3Aexternal=True'
else:
query += 'router%3Aexternal=True'
for tag in tags:
if query:
query += "&tag=" + tag
else:
query = "tag=" + tag
if detail:
query = query and query + '&verbose=True' or 'verbose=True'
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path, query), self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue('X-Auth-Token', test_cli20.TOKEN)
).AndReturn((test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertIn('myid1', _str)
def test_list_external_nets_detail(self):
# list external nets: -D.
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources, cmd, True)
def test_list_external_nets_tags(self):
# List external nets: -- --tags a b.
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources,
cmd, tags=['a', 'b'])
def test_list_external_nets_detail_tags(self):
# List external nets: -D -- --tags a b.
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources, cmd,
detail=True, tags=['a', 'b'])
def test_list_externel_nets_fields(self):
# List external nets: --fields a --fields b -- --fields c d.
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources, cmd,
fields_1=['a', 'b'],
fields_2=['c', 'd'])
def test_update_network_exception(self):
# Update net: myid.
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self.assertRaises(exceptions.CommandError, self._test_update_resource,
resource, cmd, 'myid', ['myid'], {})
def test_update_network(self):
# Update net: myid --name myname --tags a b.
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--tags', 'a', 'b', '--description',
'This network takes the scenic route'],
{'name': 'myname', 'tags': ['a', 'b'],
'description': 'This network takes the '
'scenic route'})
def test_update_network_with_unicode(self):
# Update net: myid --name u'\u7f51\u7edc' --tags a b.
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', u'\u7f51\u7edc',
'--tags', 'a', 'b'],
{'name': u'\u7f51\u7edc',
'tags': ['a', 'b'], }
)
def test_update_network_with_qos_policy(self):
# Update net: myid --qos-policy mypolicy.
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--qos-policy', 'mypolicy'],
{'qos_policy_id': 'mypolicy', })
def test_update_network_with_no_qos_policy(self):
# Update net: myid --no-qos-policy.
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--no-qos-policy'],
{'qos_policy_id': None, })
def test_update_network_with_dns_domain(self):
# Update net: myid --dns-domain my-domain.org.
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--dns-domain', 'my-domain.org.'],
{'dns_domain': 'my-domain.org.', })
def test_update_network_with_no_dns_domain(self):
# Update net: myid --no-dns-domain
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--no-dns-domain'],
{'dns_domain': "", })
def test_show_network(self):
# Show net: --fields id --fields name myid.
resource = 'network'
cmd = network.ShowNetwork(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args,
['id', 'name'])
def test_delete_network(self):
# Delete net: myid.
resource = 'network'
cmd = network.DeleteNetwork(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
def test_bulk_delete_network(self):
# Delete net: myid1 myid2.
resource = 'network'
cmd = network.DeleteNetwork(test_cli20.MyApp(sys.stdout), None)
myid1 = 'myid1'
myid2 = 'myid2'
args = [myid1, myid2]
self._test_delete_resource(resource, cmd, myid1, args, extra_id=myid2)
def _test_extend_list(self, mox_calls):
data = [{'id': 'netid%d' % i, 'name': 'net%d' % i,
'subnets': ['mysubid%d' % i]}
for i in range(10)]
self.mox.StubOutWithMock(self.client.httpclient, "request")
path = getattr(self.client, 'subnets_path')
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, "get_client")
cmd.get_client().MultipleTimes().AndReturn(self.client)
mox_calls(path, data)
self.mox.ReplayAll()
known_args, _vs = cmd.get_parser('create_subnets').parse_known_args()
cmd.extend_list(data, known_args)
self.mox.VerifyAll()
def _build_test_data(self, data):
subnet_ids = []
response = []
filters = ""
for n in data:
if 'subnets' in n:
subnet_ids.extend(n['subnets'])
for subnet_id in n['subnets']:
filters = "%s&id=%s" % (filters, subnet_id)
response.append({'id': subnet_id,
'cidr': '192.168.0.0/16'})
resp_str = self.client.serialize({'subnets': response})
resp = (test_cli20.MyResp(200), resp_str)
return filters, resp
def test_extend_list(self):
def mox_calls(path, data):
filters, response = self._build_test_data(data)
self.client.httpclient.request(
test_cli20.MyUrlComparator(test_cli20.end_url(
path, 'fields=id&fields=cidr' + filters), self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(response)
self._test_extend_list(mox_calls)
def test_extend_list_exceed_max_uri_len(self):
def mox_calls(path, data):
sub_data_lists = [data[:len(data) - 1], data[len(data) - 1:]]
filters, response = self._build_test_data(data)
# 1 char of extra URI len will cause a split in 2 requests
self.mox.StubOutWithMock(self.client.httpclient,
"_check_uri_length")
self.client.httpclient._check_uri_length(mox.IgnoreArg()).AndRaise(
exceptions.RequestURITooLong(excess=1))
for data in sub_data_lists:
filters, response = self._build_test_data(data)
self.client.httpclient._check_uri_length(
mox.IgnoreArg()).AndReturn(None)
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(
path, 'fields=id&fields=cidr%s' % filters),
self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(response)
self._test_extend_list(mox_calls)
def test_list_shared_networks(self):
# list nets : --shared False
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, base_args='--shared False'.split(),
query='shared=False')
| {
"repo_name": "eayunstack/python-neutronclient",
"path": "neutronclient/tests/unit/test_cli20_network.py",
"copies": "1",
"size": "29788",
"license": "apache-2.0",
"hash": -4329214387494925000,
"line_mean": 43.1958456973,
"line_max": 79,
"alpha_frac": 0.5399489727,
"autogenerated": false,
"ratio": 3.822404722186578,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9862353694886578,
"avg_score": 0,
"num_lines": 674
} |
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import netaddr
from neutron_lib import context as nctx
from neutron_lib.db import api as db_api
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
from sqlalchemy.orm import session as se
from webob import exc
from neutron.db import models_v2
from neutron.objects import ports as port_obj
from neutron.tests.unit.plugins.ml2 import test_plugin
class TestRevisionPlugin(test_plugin.Ml2PluginV2TestCase):
l3_plugin = ('neutron.tests.unit.extensions.test_extraroute.'
'TestExtraRouteL3NatServicePlugin')
_extension_drivers = ['qos']
def get_additional_service_plugins(self):
p = super(TestRevisionPlugin, self).get_additional_service_plugins()
p.update({'revision_plugin_name': 'revisions',
'qos_plugin_name': 'qos',
'tag_name': 'tag'})
return p
def setUp(self):
cfg.CONF.set_override('extension_drivers',
self._extension_drivers,
group='ml2')
super(TestRevisionPlugin, self).setUp()
self.cp = directory.get_plugin()
self.l3p = directory.get_plugin(constants.L3)
self._ctx = nctx.get_admin_context()
@property
def ctx(self):
# TODO(kevinbenton): return ctx without expire_all after switch to
# enginefacade complete. We expire_all here because the switch to
# the new engine facade is resulting in changes being spread over
# other sessions so we can end up getting stale reads in the parent
# session if objects remain in the identity map.
if not self._ctx.session.is_active:
self._ctx.session.expire_all()
return self._ctx
def test_handle_expired_object(self):
rp = directory.get_plugin('revision_plugin')
with self.port():
with self.ctx.session.begin():
ipal_objs = port_obj.IPAllocation.get_objects(self.ctx)
if not ipal_objs:
raise Exception("No IP allocations available.")
ipal_obj = ipal_objs[0]
# load port into our session
port = self.ctx.session.query(models_v2.Port).one()
# simulate concurrent delete in another session
other_ctx = nctx.get_admin_context()
other_ctx.session.delete(
other_ctx.session.query(models_v2.Port).first()
)
# expire the port so the revision bumping code will trigger a
# lookup on its attributes and encounter an ObjectDeletedError
self.ctx.session.expire(port)
rp._bump_related_revisions(self.ctx.session, ipal_obj)
def test_port_name_update_revises(self):
with self.port() as port:
rev = port['port']['revision_number']
new = {'port': {'name': 'seaweed'}}
response = self._update('ports', port['port']['id'], new)
new_rev = response['port']['revision_number']
self.assertGreater(new_rev, rev)
def test_constrained_port_update(self):
with self.port() as port:
rev = port['port']['revision_number']
new = {'port': {'name': 'nigiri'}}
for val in (rev - 1, rev + 1):
# make sure off-by ones are rejected
self._update('ports', port['port']['id'], new,
headers={'If-Match': 'revision_number=%s' % val},
expected_code=exc.HTTPPreconditionFailed.code)
after_attempt = self._show('ports', port['port']['id'])
self.assertEqual(rev, after_attempt['port']['revision_number'])
self.assertEqual(port['port']['name'],
after_attempt['port']['name'])
# correct revision should work
self._update('ports', port['port']['id'], new,
headers={'If-Match': 'revision_number=%s' % rev})
def test_constrained_port_delete(self):
with self.port() as port:
rev = port['port']['revision_number']
for val in (rev - 1, rev + 1):
# make sure off-by ones are rejected
self._delete('ports', port['port']['id'],
headers={'If-Match': 'revision_number=%s' % val},
expected_code=exc.HTTPPreconditionFailed.code)
# correct revision should work
self._delete('ports', port['port']['id'],
headers={'If-Match': 'revision_number=%s' % rev})
def test_constrained_port_update_handles_db_retries(self):
# here we ensure all of the constraint handling logic persists
# on retriable failures to commit caused by races with another
# update
with self.port() as port:
rev = port['port']['revision_number']
new = {'port': {'name': 'nigiri'}}
def concurrent_increment(s):
db_api.sqla_remove(se.Session, 'before_commit',
concurrent_increment)
# slip in a concurrent update that will bump the revision
plugin = directory.get_plugin()
plugin.update_port(nctx.get_admin_context(),
port['port']['id'], new)
raise db_exc.DBDeadlock()
db_api.sqla_listen(se.Session, 'before_commit',
concurrent_increment)
self._update('ports', port['port']['id'], new,
headers={'If-Match': 'revision_number=%s' % rev},
expected_code=exc.HTTPPreconditionFailed.code)
def test_port_ip_update_revises(self):
with self.port() as port:
rev = port['port']['revision_number']
new = {'port': {'fixed_ips': port['port']['fixed_ips']}}
# ensure adding an IP allocation updates the port
next_ip = str(netaddr.IPAddress(
new['port']['fixed_ips'][0]['ip_address']) + 1)
new['port']['fixed_ips'].append({'ip_address': next_ip})
response = self._update('ports', port['port']['id'], new)
self.assertEqual(2, len(response['port']['fixed_ips']))
new_rev = response['port']['revision_number']
self.assertGreater(new_rev, rev)
# ensure deleting an IP allocation updates the port
rev = new_rev
new['port']['fixed_ips'].pop()
response = self._update('ports', port['port']['id'], new)
self.assertEqual(1, len(response['port']['fixed_ips']))
new_rev = response['port']['revision_number']
self.assertGreater(new_rev, rev)
def test_security_group_rule_ops_bump_security_group(self):
s = {'security_group': {'tenant_id': 'some_tenant', 'name': '',
'description': 's'}}
sg = self.cp.create_security_group(self.ctx, s)
s['security_group']['name'] = 'hello'
updated = self.cp.update_security_group(self.ctx, sg['id'], s)
self.assertGreater(updated['revision_number'], sg['revision_number'])
# ensure rule changes bump parent SG
r = {'security_group_rule': {'tenant_id': 'some_tenant',
'port_range_min': 80, 'protocol': 6,
'port_range_max': 90,
'remote_ip_prefix': '0.0.0.0/0',
'ethertype': 'IPv4',
'remote_group_id': None,
'direction': 'ingress',
'security_group_id': sg['id']}}
rule = self.cp.create_security_group_rule(self.ctx, r)
sg = updated
updated = self.cp.get_security_group(self.ctx, sg['id'])
self.assertGreater(updated['revision_number'], sg['revision_number'])
self.cp.delete_security_group_rule(self.ctx, rule['id'])
sg = updated
updated = self.cp.get_security_group(self.ctx, sg['id'])
self.assertGreater(updated['revision_number'], sg['revision_number'])
def test_router_interface_ops_bump_router(self):
r = {'router': {'name': 'myrouter', 'tenant_id': 'some_tenant',
'admin_state_up': True}}
router = self.l3p.create_router(self.ctx, r)
r['router']['name'] = 'yourrouter'
updated = self.l3p.update_router(self.ctx, router['id'], r)
self.assertGreater(updated['revision_number'],
router['revision_number'])
# add an intf and make sure it bumps rev
with self.subnet(tenant_id='some_tenant', cidr='10.0.1.0/24') as s:
interface_info = {'subnet_id': s['subnet']['id']}
self.l3p.add_router_interface(self.ctx, router['id'],
interface_info)
router = updated
updated = self.l3p.get_router(self.ctx, router['id'])
self.assertGreater(updated['revision_number'],
router['revision_number'])
# Add a route and make sure it bumps revision number
router = updated
body = {'router': {'routes': [{'destination': '192.168.2.0/24',
'nexthop': '10.0.1.3'}]}}
self.l3p.update_router(self.ctx, router['id'], body)
updated = self.l3p.get_router(self.ctx, router['id'])
self.assertGreater(updated['revision_number'],
router['revision_number'])
router = updated
body['router']['routes'] = []
self.l3p.update_router(self.ctx, router['id'], body)
updated = self.l3p.get_router(self.ctx, router['id'])
self.assertGreater(updated['revision_number'],
router['revision_number'])
self.l3p.remove_router_interface(self.ctx, router['id'],
interface_info)
router = updated
updated = self.l3p.get_router(self.ctx, router['id'])
self.assertGreater(updated['revision_number'],
router['revision_number'])
def test_qos_policy_bump_port_revision(self):
with self.port() as port:
rev = port['port']['revision_number']
qos_plugin = directory.get_plugin('QOS')
qos_policy = {'policy': {'id': uuidutils.generate_uuid(),
'name': "policy1",
'project_id': uuidutils.generate_uuid()}}
qos_obj = qos_plugin.create_policy(self.ctx, qos_policy)
data = {'port': {'qos_policy_id': qos_obj['id']}}
response = self._update('ports', port['port']['id'], data)
new_rev = response['port']['revision_number']
self.assertGreater(new_rev, rev)
def test_qos_policy_bump_network_revision(self):
with self.network() as network:
rev = network['network']['revision_number']
qos_plugin = directory.get_plugin('QOS')
qos_policy = {'policy': {'id': uuidutils.generate_uuid(),
'name': "policy1",
'project_id': uuidutils.generate_uuid()}}
qos_obj = qos_plugin.create_policy(self.ctx, qos_policy)
data = {'network': {'qos_policy_id': qos_obj['id']}}
response = self._update('networks', network['network']['id'], data)
new_rev = response['network']['revision_number']
self.assertGreater(new_rev, rev)
def test_net_tag_bumps_net_revision(self):
with self.network() as network:
rev = network['network']['revision_number']
tag_plugin = directory.get_plugin('TAG')
tag_plugin.update_tag(self.ctx, 'networks',
network['network']['id'], 'mytag')
updated = directory.get_plugin().get_network(
self.ctx, network['network']['id'])
self.assertGreater(updated['revision_number'], rev)
tag_plugin.delete_tag(self.ctx, 'networks',
network['network']['id'], 'mytag')
rev = updated['revision_number']
updated = directory.get_plugin().get_network(
self.ctx, network['network']['id'])
self.assertGreater(updated['revision_number'], rev)
| {
"repo_name": "noironetworks/neutron",
"path": "neutron/tests/unit/services/revisions/test_revision_plugin.py",
"copies": "2",
"size": "13243",
"license": "apache-2.0",
"hash": -8037806034904401000,
"line_mean": 48.4141791045,
"line_max": 79,
"alpha_frac": 0.5516121725,
"autogenerated": false,
"ratio": 4.198795180722891,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 268
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import random
import time
import six
from rally.common.i18n import _
from rally import osclients
from rally.plugins.openstack import scenario
from rally.task import atomic
class FuelEnvManager(object):
def __init__(self, client):
self.client = client
def get(self, env_id):
try:
return self.client.get_by_id(env_id)
except BaseException:
return None
def list(self):
"""List Fuel environments."""
try:
return self.client.get_all()
except SystemExit:
raise RuntimeError(_("Can't list environments. "
"Please check server availability."))
def create(self, name, release_id=1,
network_provider="neutron",
deployment_mode="ha_compact",
net_segment_type="vlan"):
try:
env = self.client.create(name, release_id, network_provider,
deployment_mode, net_segment_type)
except SystemExit:
raise RuntimeError(_("Something went wrong while creating an "
"environment. This can happen when "
"environment with name %s already exists.")
% name)
if env:
return env
raise RuntimeError(_("Environment was not created or was "
"created but not returned by server."))
def delete(self, env_id, retries=5, retry_pause=0.5):
env = self.get(env_id)
retry_number = 0
while env:
if retry_number > retries:
raise RuntimeError(_("Can't delete environment "
"id: %s ") % env_id)
try:
self.client.delete_by_id(env_id)
except BaseException:
time.sleep(retry_pause)
env = self.get(env_id)
retry_number += 1
class FuelClient(object):
"""Thin facade over `fuelclient.get_client'."""
def __init__(self, version, server_address, server_port, username,
password):
# NOTE(amaretskiy): For now, there are only 2 ways how to
# configure fuelclient connection:
# * configuration file - this is not convenient to create
# separate file for each benchmark
# * env variables - this approach is preferable
os.environ["SERVER_ADDRESS"] = server_address
os.environ["LISTEN_PORT"] = str(server_port)
os.environ["KEYSTONE_USER"] = username
os.environ["KEYSTONE_PASS"] = password
import fuelclient
FuelClient.fuelclient_module = fuelclient
get_client = fuelclient.get_client
self.environment = FuelEnvManager(get_client(
"environment", version=version))
self.node = get_client("node", version=version)
self.task = get_client("task", version=version)
@osclients.configure("fuel", default_version="v1")
class Fuel(osclients.OSClient):
"""FuelClient factory for osclients.Clients."""
def create_client(self, *args, **kwargs):
auth_url = six.moves.urllib.parse.urlparse(self.credential.auth_url)
return FuelClient(version=self.choose_version(),
server_address=auth_url.hostname,
server_port=8000,
username=self.credential.username,
password=self.credential.password)
class FuelScenario(scenario.OpenStackScenario):
"""Base class for Fuel scenarios."""
@atomic.action_timer("fuel.list_environments")
def _list_environments(self):
return self.admin_clients("fuel").environment.list()
@atomic.action_timer("fuel.create_environment")
def _create_environment(self, release_id=1,
network_provider="neutron",
deployment_mode="ha_compact",
net_segment_type="vlan"):
name = self.generate_random_name()
env = self.admin_clients("fuel").environment.create(
name, release_id, network_provider, deployment_mode,
net_segment_type)
return env["id"]
@atomic.action_timer("fuel.delete_environment")
def _delete_environment(self, env_id, retries=5):
self.admin_clients("fuel").environment.delete(env_id, retries)
@atomic.action_timer("fuel.add_node")
def _add_node(self, env_id, node_ids, node_roles=None):
"""Add node to environment
:param env_id: environment id
:param node_ids: list of node ids
:param node_roles: list of roles
"""
node_roles = node_roles or ["compute"]
try:
self.admin_clients("fuel").environment.client.add_nodes(
env_id, node_ids, node_roles)
except BaseException as e:
raise RuntimeError(
"Unable to add node(s) to environment. Fuel client exited "
"with error %s" % e)
@atomic.action_timer("fuel.delete_node")
def _remove_node(self, env_id, node_id):
env = FuelClient.fuelclient_module.objects.environment.Environment(
env_id)
try:
env.unassign([node_id])
except BaseException as e:
raise RuntimeError(
"Unable to add node(s) to environment. Fuel client exited "
"with error %s" % e)
@atomic.action_timer("fuel.list_nodes")
def _list_node_ids(self, env_id=None):
result = self.admin_clients("fuel").node.get_all(
environment_id=env_id)
return [x["id"] for x in result]
def _node_is_assigned(self, node_id):
try:
node = self.admin_clients("fuel").node.get_by_id(node_id)
return bool(node["cluster"])
except BaseException as e:
raise RuntimeError(
"Unable to add node(s) to environment. Fuel client exited "
"with error %s" % e)
def _get_free_node_id(self):
node_ids = self._list_node_ids()
random.shuffle(node_ids)
for node_id in node_ids:
if not self._node_is_assigned(node_id):
return node_id
else:
raise RuntimeError("Can not found free node.")
| {
"repo_name": "varuntiwari27/rally",
"path": "rally/plugins/openstack/scenarios/fuel/utils.py",
"copies": "4",
"size": "6978",
"license": "apache-2.0",
"hash": 137909582732891580,
"line_mean": 34.7846153846,
"line_max": 78,
"alpha_frac": 0.5815419891,
"autogenerated": false,
"ratio": 4.183453237410072,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6764995226510072,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
import mox
from neutronclient.common import exceptions
from neutronclient.common import utils
from neutronclient.neutron.v2_0 import network
from neutronclient import shell
from neutronclient.tests.unit import test_cli20
class CLITestV20NetworkJSON(test_cli20.CLITestV20Base):
def setUp(self):
super(CLITestV20NetworkJSON, self).setUp(plurals={'tags': 'tag'})
def test_create_network(self):
"""Create net: myname."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = [name, ]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_network_with_unicode(self):
"""Create net: u'\u7f51\u7edc'."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = u'\u7f51\u7edc'
myid = 'myid'
args = [name, ]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_network_tenant(self):
"""Create net: --tenant_id tenantid myname."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = ['--tenant_id', 'tenantid', name]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
# Test dashed options
args = ['--tenant-id', 'tenantid', name]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_network_tags(self):
"""Create net: myname --tags a b."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = [name, '--tags', 'a', 'b']
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tags=['a', 'b'])
def test_create_network_state(self):
"""Create net: --admin_state_down myname."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = ['--admin_state_down', name, ]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
admin_state_up=False)
# Test dashed options
args = ['--admin-state-down', name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
admin_state_up=False)
def test_list_nets_empty_with_column(self):
resources = "networks"
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: []}
resstr = self.client.serialize(reses)
# url method body
query = "id=myfakeid"
args = ['-c', 'id', '--', '--id', 'myfakeid']
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.end_url(path, query), 'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token',
test_cli20.TOKEN)).AndReturn(
(test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertEqual('\n', _str)
def _test_list_networks(self, cmd, detail=False, tags=[],
fields_1=[], fields_2=[], page_size=None,
sort_key=[], sort_dir=[]):
resources = "networks"
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
self._test_list_resources(resources, cmd, detail, tags,
fields_1, fields_2, page_size=page_size,
sort_key=sort_key, sort_dir=sort_dir)
def test_list_nets_pagination(self):
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
self._test_list_resources_with_pagination("networks", cmd)
def test_list_nets_sort(self):
"""list nets: --sort-key name --sort-key id --sort-dir asc
--sort-dir desc
"""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, sort_key=['name', 'id'],
sort_dir=['asc', 'desc'])
def test_list_nets_sort_with_keys_more_than_dirs(self):
"""list nets: --sort-key name --sort-key id --sort-dir desc
"""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, sort_key=['name', 'id'],
sort_dir=['desc'])
def test_list_nets_sort_with_dirs_more_than_keys(self):
"""list nets: --sort-key name --sort-dir desc --sort-dir asc
"""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, sort_key=['name'],
sort_dir=['desc', 'asc'])
def test_list_nets_limit(self):
"""list nets: -P."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, page_size=1000)
def test_list_nets_detail(self):
"""list nets: -D."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, True)
def test_list_nets_tags(self):
"""List nets: -- --tags a b."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, tags=['a', 'b'])
def test_list_nets_tags_with_unicode(self):
"""List nets: -- --tags u'\u7f51\u7edc'."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, tags=[u'\u7f51\u7edc'])
def test_list_nets_detail_tags(self):
"""List nets: -D -- --tags a b."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, detail=True, tags=['a', 'b'])
def _test_list_nets_extend_subnets(self, data, expected):
def setup_list_stub(resources, data, query):
reses = {resources: data}
resstr = self.client.serialize(reses)
resp = (test_cli20.MyResp(200), resstr)
path = getattr(self.client, resources + '_path')
self.client.httpclient.request(
test_cli20.end_url(path, query), 'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(resp)
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, 'get_client')
self.mox.StubOutWithMock(self.client.httpclient, 'request')
cmd.get_client().AndReturn(self.client)
setup_list_stub('networks', data, '')
cmd.get_client().AndReturn(self.client)
filters = ''
for n in data:
for s in n['subnets']:
filters = filters + "&id=%s" % s
setup_list_stub('subnets',
[{'id': 'mysubid1', 'cidr': '192.168.1.0/24'},
{'id': 'mysubid2', 'cidr': '172.16.0.0/24'},
{'id': 'mysubid3', 'cidr': '10.1.1.0/24'}],
query='fields=id&fields=cidr' + filters)
self.mox.ReplayAll()
args = []
cmd_parser = cmd.get_parser('list_networks')
parsed_args = cmd_parser.parse_args(args)
result = cmd.get_data(parsed_args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_result = [x for x in result[1]]
self.assertEqual(len(_result), len(expected))
for res, exp in zip(_result, expected):
self.assertEqual(len(res), len(exp))
for a, b in zip(res, exp):
self.assertEqual(a, b)
def test_list_nets_extend_subnets(self):
data = [{'id': 'netid1', 'name': 'net1', 'subnets': ['mysubid1']},
{'id': 'netid2', 'name': 'net2', 'subnets': ['mysubid2',
'mysubid3']}]
# id, name, subnets
expected = [('netid1', 'net1', 'mysubid1 192.168.1.0/24'),
('netid2', 'net2',
'mysubid2 172.16.0.0/24\nmysubid3 10.1.1.0/24')]
self._test_list_nets_extend_subnets(data, expected)
def test_list_nets_extend_subnets_no_subnet(self):
data = [{'id': 'netid1', 'name': 'net1', 'subnets': ['mysubid1']},
{'id': 'netid2', 'name': 'net2', 'subnets': ['mysubid4']}]
# id, name, subnets
expected = [('netid1', 'net1', 'mysubid1 192.168.1.0/24'),
('netid2', 'net2', 'mysubid4 ')]
self._test_list_nets_extend_subnets(data, expected)
def test_list_nets_fields(self):
"""List nets: --fields a --fields b -- --fields c d."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd,
fields_1=['a', 'b'], fields_2=['c', 'd'])
def _test_list_nets_columns(self, cmd, returned_body,
args=['-f', 'json']):
resources = 'networks'
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
self._test_list_columns(cmd, resources, returned_body, args=args)
def test_list_nets_defined_column(self):
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
returned_body = {"networks": [{"name": "buildname3",
"id": "id3",
"tenant_id": "tenant_3",
"subnets": []}]}
self._test_list_nets_columns(cmd, returned_body,
args=['-f', 'json', '-c', 'id'])
_str = self.fake_stdout.make_string()
returned_networks = utils.loads(_str)
self.assertEqual(1, len(returned_networks))
net = returned_networks[0]
self.assertEqual(1, len(net))
self.assertEqual("id", net.keys()[0])
def test_list_nets_with_default_column(self):
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
returned_body = {"networks": [{"name": "buildname3",
"id": "id3",
"tenant_id": "tenant_3",
"subnets": []}]}
self._test_list_nets_columns(cmd, returned_body)
_str = self.fake_stdout.make_string()
returned_networks = utils.loads(_str)
self.assertEqual(1, len(returned_networks))
net = returned_networks[0]
self.assertEqual(3, len(net))
self.assertEqual(0, len(set(net) ^ set(cmd.list_columns)))
def test_list_external_nets_empty_with_column(self):
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: []}
resstr = self.client.serialize(reses)
# url method body
query = "router%3Aexternal=True&id=myfakeid"
args = ['-c', 'id', '--', '--id', 'myfakeid']
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.end_url(path, query), 'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token',
test_cli20.TOKEN)).AndReturn(
(test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertEqual('\n', _str)
def _test_list_external_nets(self, resources, cmd,
detail=False, tags=[],
fields_1=[], fields_2=[]):
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: [{'id': 'myid1', },
{'id': 'myid2', }, ], }
resstr = self.client.serialize(reses)
# url method body
query = ""
args = detail and ['-D', ] or []
if fields_1:
for field in fields_1:
args.append('--fields')
args.append(field)
if tags:
args.append('--')
args.append("--tag")
for tag in tags:
args.append(tag)
if (not tags) and fields_2:
args.append('--')
if fields_2:
args.append("--fields")
for field in fields_2:
args.append(field)
fields_1.extend(fields_2)
for field in fields_1:
if query:
query += "&fields=" + field
else:
query = "fields=" + field
if query:
query += '&router%3Aexternal=True'
else:
query += 'router%3Aexternal=True'
for tag in tags:
if query:
query += "&tag=" + tag
else:
query = "tag=" + tag
if detail:
query = query and query + '&verbose=True' or 'verbose=True'
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.end_url(path, query), 'GET',
body=None,
headers=mox.ContainsKeyValue('X-Auth-Token', test_cli20.TOKEN)
).AndReturn((test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertIn('myid1', _str)
def test_list_external_nets_detail(self):
"""list external nets: -D."""
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources, cmd, True)
def test_list_external_nets_tags(self):
"""List external nets: -- --tags a b."""
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources,
cmd, tags=['a', 'b'])
def test_list_external_nets_detail_tags(self):
"""List external nets: -D -- --tags a b."""
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources, cmd,
detail=True, tags=['a', 'b'])
def test_list_externel_nets_fields(self):
"""List external nets: --fields a --fields b -- --fields c d."""
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources, cmd,
fields_1=['a', 'b'],
fields_2=['c', 'd'])
def test_update_network_exception(self):
"""Update net: myid."""
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self.assertRaises(exceptions.CommandError, self._test_update_resource,
resource, cmd, 'myid', ['myid'], {})
def test_update_network(self):
"""Update net: myid --name myname --tags a b."""
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--tags', 'a', 'b'],
{'name': 'myname', 'tags': ['a', 'b'], }
)
def test_update_network_with_unicode(self):
"""Update net: myid --name u'\u7f51\u7edc' --tags a b."""
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', u'\u7f51\u7edc',
'--tags', 'a', 'b'],
{'name': u'\u7f51\u7edc',
'tags': ['a', 'b'], }
)
def test_show_network(self):
"""Show net: --fields id --fields name myid."""
resource = 'network'
cmd = network.ShowNetwork(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args,
['id', 'name'])
def test_delete_network(self):
"""Delete net: myid."""
resource = 'network'
cmd = network.DeleteNetwork(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
def _test_extend_list(self, mox_calls):
data = [{'id': 'netid%d' % i, 'name': 'net%d' % i,
'subnets': ['mysubid%d' % i]}
for i in range(10)]
self.mox.StubOutWithMock(self.client.httpclient, "request")
path = getattr(self.client, 'subnets_path')
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, "get_client")
cmd.get_client().MultipleTimes().AndReturn(self.client)
mox_calls(path, data)
self.mox.ReplayAll()
known_args, _vs = cmd.get_parser('create_subnets').parse_known_args()
cmd.extend_list(data, known_args)
self.mox.VerifyAll()
def _build_test_data(self, data):
subnet_ids = []
response = []
filters = ""
for n in data:
if 'subnets' in n:
subnet_ids.extend(n['subnets'])
for subnet_id in n['subnets']:
filters = "%s&id=%s" % (filters, subnet_id)
response.append({'id': subnet_id,
'cidr': '192.168.0.0/16'})
resp_str = self.client.serialize({'subnets': response})
resp = (test_cli20.MyResp(200), resp_str)
return filters, resp
def test_extend_list(self):
def mox_calls(path, data):
filters, response = self._build_test_data(data)
self.client.httpclient.request(
test_cli20.end_url(path, 'fields=id&fields=cidr' + filters),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(response)
self._test_extend_list(mox_calls)
def test_extend_list_exceed_max_uri_len(self):
def mox_calls(path, data):
sub_data_lists = [data[:len(data) - 1], data[len(data) - 1:]]
filters, response = self._build_test_data(data)
# 1 char of extra URI len will cause a split in 2 requests
self.mox.StubOutWithMock(self.client, "_check_uri_length")
self.client._check_uri_length(mox.IgnoreArg()).AndRaise(
exceptions.RequestURITooLong(excess=1))
for data in sub_data_lists:
filters, response = self._build_test_data(data)
self.client._check_uri_length(mox.IgnoreArg()).AndReturn(None)
self.client.httpclient.request(
test_cli20.end_url(path,
'fields=id&fields=cidr%s' % filters),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(response)
self._test_extend_list(mox_calls)
class CLITestV20NetworkXML(CLITestV20NetworkJSON):
format = 'xml'
| {
"repo_name": "vijayendrabvs/ssl-python-neutronclient",
"path": "neutronclient/tests/unit/test_cli20_network.py",
"copies": "1",
"size": "23050",
"license": "apache-2.0",
"hash": 3938712530012139000,
"line_mean": 42.3270676692,
"line_max": 78,
"alpha_frac": 0.5393058568,
"autogenerated": false,
"ratio": 3.7706527073450022,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9809958564145003,
"avg_score": 0,
"num_lines": 532
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from tempest.lib.common.utils import misc as misc_utils
from tempest.lib import exceptions as lib_exc
def wait_for_bm_node_status(client, node_id, attr, status):
"""Waits for a baremetal node attribute to reach given status.
The client should have a show_node(node_uuid) method to get the node.
"""
_, node = client.show_node(node_id)
start = int(time.time())
while node[attr] != status:
time.sleep(client.build_interval)
_, node = client.show_node(node_id)
status_curr = node[attr]
if status_curr == status:
return
if int(time.time()) - start >= client.build_timeout:
message = ('Node %(node_id)s failed to reach %(attr)s=%(status)s '
'within the required time (%(timeout)s s).' %
{'node_id': node_id,
'attr': attr,
'status': status,
'timeout': client.build_timeout})
message += ' Current state of %s: %s.' % (attr, status_curr)
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise lib_exc.TimeoutException(message)
| {
"repo_name": "bacaldwell/ironic",
"path": "ironic_tempest_plugin/common/waiters.py",
"copies": "2",
"size": "1847",
"license": "apache-2.0",
"hash": -8582019581826489000,
"line_mean": 37.4791666667,
"line_max": 78,
"alpha_frac": 0.608012994,
"autogenerated": false,
"ratio": 4.006507592190889,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.561452058619089,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This module was heavily based on https://github.com/sdague/failopotamus/
"""
import json
import urllib
import requests
import tinyurl
from utils import ArgumentParser
COLORS = (
('ff0000', 'b00000'),
('0000ff', '0000b0'),
('00ff00', '00b000'),
)
def graphite_base_url(since=200, avg=12):
ylabel = urllib.quote('Failure Rate in Percent')
title = urllib.quote('Test failure rates over last %s hours '
'(%s hour rolling average)' % (since, avg))
return ('http://graphite.openstack.org/render/?from=-%dhours'
'&height=500&until=now&width=800&bgcolor=ffffff'
'&fgcolor=000000&yMax=100&yMin=0&vtitle=%s'
'&title=%s&drawNullAsZero=true'
) % (since, ylabel, title)
def failrate(job, queue, color, width=1, avg=12):
title = urllib.quote('%s (%s)' % (job, queue))
return ('target=lineWidth(color('
'alias('
'movingAverage('
'asPercent('
'transformNull('
'stats_counts.zuul.pipeline.%(queue)s.job.%(job)s.FAILURE),'
'transformNull(sum(stats_counts.zuul.pipeline.%(queue)s.'
'job.%(job)s.{SUCCESS,FAILURE})))'
',%%27%(time)shours%%27),%%20%%27%(title)s%%27),'
'%%27%(color)s%%27),'
'%(width)s)' %
{'job': job, 'queue': queue, 'time': avg,
'color': color, 'title': title, 'width': width})
def target_in_pipeline(target, pipeline):
json_data = ('http://graphite.openstack.org/render?target='
'stats.zuul.pipeline.%(pipeline)s.job'
'.%(target)s.*&format=json' %
{'pipeline': pipeline, 'target': target})
resp = requests.get(json_data)
data = json.loads(resp.content)
# if the data is blank, this doesn't exist on the graphite server
# at all
return True if data else False
def get_targets(target, colors, avg=12):
targets = []
color = 0
width = 1
for pipeline in ('check', 'gate'):
if target_in_pipeline(target, pipeline):
targets.append(
failrate(target, pipeline, colors[color], width, avg))
width += 1
color += 1
return targets
def parse_args(args):
parser = ArgumentParser()
parser.add_argument('tests', nargs='+')
parser.add_argument('-d', '--duration', type=int, default=200,
help='Graph over duration hours (default 200)')
parser.add_argument('-s', '--smoothing', type=int, default=12,
help='Rolling average hours (defaults to 12)')
return parser.parse_args(args)
def failgraph(args):
args = parse_args(args)
targetlist = ''
colorpairs = 0
targets = None
for target in args.tests:
targets = get_targets(target, COLORS[colorpairs % len(COLORS)],
avg=args.smoothing)
colorpairs += 1
subtarglist = '&'.join(targets)
targetlist = '&'.join([targetlist, subtarglist])
if not targets:
return 'No data'
url = '&'.join((graphite_base_url(since=args.duration,
avg=args.smoothing), targetlist))
return tinyurl.create_one(url)
| {
"repo_name": "umago/pixiebot",
"path": "pixiebot/failgraph.py",
"copies": "1",
"size": "3840",
"license": "apache-2.0",
"hash": -1769610849889258000,
"line_mean": 32.6842105263,
"line_max": 78,
"alpha_frac": 0.5927083333,
"autogenerated": false,
"ratio": 3.7317784256559765,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9824486758955977,
"avg_score": 0,
"num_lines": 114
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Valid trunk statuses
# The trunk is happy, yay!
# A trunk remains in ACTIVE state when updates like name or admin_status_up
# occur. It goes back to ACTIVE state from other states (e.g. BUILD) when
# logical and physical resource provisioning has completed successfully. The
# attribute ADMIN_STATE_UP is not to be confused with STATUS: the former
# indicates whether a trunk can be managed. If a trunk has admin_state_up
# equal to false, the trunk plugin will reject any user request to manage
# the trunk resources (i.e. adding/removing sub-ports). ACTIVE_STATUS
# reflects the provisioning state of logical and physical resources associated
# with the trunk.
TRUNK_ACTIVE_STATUS = 'ACTIVE'
# A trunk is in DOWN state any time the logical and physical resources
# associated to a trunk are not in sync. This can happen in the following
# cases:
# a) A user has asked to create a trunk, or add(remove) subports to a
# trunk in ACTIVE state. In this case, the plugin has created/updated the
# logical resource, and the request has been passed along to a backend. The
# physical resources associated to the trunk are in the process of being
# (de)commissioned. While this happens, the logical and physical state are
# mismatching, albeit temporarily during subport operations, or until a user
# spawns a VM after a trunk creation.
# b) A system event, such as instance deletion, has led to the deprovisioning
# of the entire set of physical resources associated to the trunk. In this
# case, the logical resource exists but it has no physical resources
# associated with it, and the logical and physical state of the trunk are
# not matching.
TRUNK_DOWN_STATUS = 'DOWN'
# A driver/backend has acknowledged the server request: once the server
# notifies the driver/backend, a trunk is in BUILD state while the
# backend provisions the trunk resources.
TRUNK_BUILD_STATUS = 'BUILD'
# Should any temporary system failure occur during the provisioning process,
# a trunk is in DEGRADED state. This means that the trunk was only
# partially provisioned, and only a subset of the subports were added
# successfully to the trunk. The operation of removing/adding the faulty
# subports may be attempted as a recovery measure.
TRUNK_DEGRADED_STATUS = 'DEGRADED'
# Due to unforeseen circumstances, the user request has led to a conflict, and
# the trunk cannot be provisioned correctly for a subset of subports. For
# instance, a subport belonging to a network might not be compatible with
# the current trunk configuration, or the binding process leads to a persistent
# failure. Removing the 'offending' resource may be attempted as a recovery
# measure, but readding it to the trunk should lead to the same error
# condition. A trunk in ERROR status should be brought back to a sane status
# (i.e. any state except ERROR state) before attempting to add more subports,
# therefore requests of adding more subports must be rejected to avoid
# cascading errors.
TRUNK_ERROR_STATUS = 'ERROR'
# String literals for identifying trunk resources
# also see SUBPORTS, TRUNK and TRUNK_PLUGIN in neutron_lib.callbacks.resources
TRUNK_PARENT_PORT = 'parent_port'
TRUNK_SUBPORT_OWNER = 'trunk:subport'
# String literals for segmentation types
SEGMENTATION_TYPE_VLAN = 'vlan'
SEGMENTATION_TYPE_INHERIT = 'inherit'
| {
"repo_name": "openstack/neutron-lib",
"path": "neutron_lib/services/trunk/constants.py",
"copies": "1",
"size": "3933",
"license": "apache-2.0",
"hash": 4317747906461339000,
"line_mean": 48.1625,
"line_max": 79,
"alpha_frac": 0.7658276125,
"autogenerated": false,
"ratio": 4.005091649694501,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5270919262194501,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import sys
import mox
from quantumclient.common import exceptions
from quantumclient.common import utils
from quantumclient.quantum.v2_0 import network
from quantumclient import shell
from tests.unit import test_cli20
class CLITestV20NetworkJSON(test_cli20.CLITestV20Base):
def setUp(self):
super(CLITestV20NetworkJSON, self).setUp(plurals={'tags': 'tag'})
def test_create_network(self):
"""Create net: myname."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = [name, ]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_network_with_unicode(self):
"""Create net: u'\u7f51\u7edc'."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = u'\u7f51\u7edc'
myid = 'myid'
args = [name, ]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_network_tenant(self):
"""Create net: --tenant_id tenantid myname."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = ['--tenant_id', 'tenantid', name]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
# Test dashed options
args = ['--tenant-id', 'tenantid', name]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_network_tags(self):
"""Create net: myname --tags a b."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = [name, '--tags', 'a', 'b']
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tags=['a', 'b'])
def test_create_network_state(self):
"""Create net: --admin_state_down myname."""
resource = 'network'
cmd = network.CreateNetwork(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
args = ['--admin_state_down', name, ]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
admin_state_up=False)
# Test dashed options
args = ['--admin-state-down', name, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
admin_state_up=False)
def test_list_nets_empty_with_column(self):
resources = "networks"
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: []}
resstr = self.client.serialize(reses)
# url method body
query = "id=myfakeid"
args = ['-c', 'id', '--', '--id', 'myfakeid']
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.end_url(path, query), 'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token',
test_cli20.TOKEN)).AndReturn(
(test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertEquals('\n', _str)
def _test_list_networks(self, cmd, detail=False, tags=[],
fields_1=[], fields_2=[], page_size=None,
sort_key=[], sort_dir=[]):
resources = "networks"
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
self._test_list_resources(resources, cmd, detail, tags,
fields_1, fields_2, page_size=page_size,
sort_key=sort_key, sort_dir=sort_dir)
def test_list_nets_pagination(self):
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
self._test_list_resources_with_pagination("networks", cmd)
def test_list_nets_sort(self):
"""list nets: --sort-key name --sort-key id --sort-dir asc
--sort-dir desc
"""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, sort_key=['name', 'id'],
sort_dir=['asc', 'desc'])
def test_list_nets_sort_with_keys_more_than_dirs(self):
"""list nets: --sort-key name --sort-key id --sort-dir desc
"""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, sort_key=['name', 'id'],
sort_dir=['desc'])
def test_list_nets_sort_with_dirs_more_than_keys(self):
"""list nets: --sort-key name --sort-dir desc --sort-dir asc
"""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, sort_key=['name'],
sort_dir=['desc', 'asc'])
def test_list_nets_limit(self):
"""list nets: -P."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, page_size=1000)
def test_list_nets_detail(self):
"""list nets: -D."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, True)
def test_list_nets_tags(self):
"""List nets: -- --tags a b."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, tags=['a', 'b'])
def test_list_nets_tags_with_unicode(self):
"""List nets: -- --tags u'\u7f51\u7edc'."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, tags=[u'\u7f51\u7edc'])
def test_list_nets_detail_tags(self):
"""List nets: -D -- --tags a b."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd, detail=True, tags=['a', 'b'])
def _test_list_nets_extend_subnets(self, data, expected):
def setup_list_stub(resources, data, query):
reses = {resources: data}
resstr = self.client.serialize(reses)
resp = (test_cli20.MyResp(200), resstr)
path = getattr(self.client, resources + '_path')
self.client.httpclient.request(
test_cli20.end_url(path, query), 'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(resp)
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, 'get_client')
self.mox.StubOutWithMock(self.client.httpclient, 'request')
cmd.get_client().AndReturn(self.client)
setup_list_stub('networks', data, '')
cmd.get_client().AndReturn(self.client)
filters = ''
for n in data:
for s in n['subnets']:
filters = filters + "&id=%s" % s
setup_list_stub('subnets',
[{'id': 'mysubid1', 'cidr': '192.168.1.0/24'},
{'id': 'mysubid2', 'cidr': '172.16.0.0/24'},
{'id': 'mysubid3', 'cidr': '10.1.1.0/24'}],
query='fields=id&fields=cidr' + filters)
self.mox.ReplayAll()
args = []
cmd_parser = cmd.get_parser('list_networks')
parsed_args = cmd_parser.parse_args(args)
result = cmd.get_data(parsed_args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_result = [x for x in result[1]]
self.assertEqual(len(_result), len(expected))
for res, exp in zip(_result, expected):
self.assertEqual(len(res), len(exp))
for a, b in zip(res, exp):
self.assertEqual(a, b)
def test_list_nets_extend_subnets(self):
data = [{'id': 'netid1', 'name': 'net1', 'subnets': ['mysubid1']},
{'id': 'netid2', 'name': 'net2', 'subnets': ['mysubid2',
'mysubid3']}]
# id, name, subnets
expected = [('netid1', 'net1', 'mysubid1 192.168.1.0/24'),
('netid2', 'net2',
'mysubid2 172.16.0.0/24\nmysubid3 10.1.1.0/24')]
self._test_list_nets_extend_subnets(data, expected)
def test_list_nets_extend_subnets_no_subnet(self):
data = [{'id': 'netid1', 'name': 'net1', 'subnets': ['mysubid1']},
{'id': 'netid2', 'name': 'net2', 'subnets': ['mysubid4']}]
# id, name, subnets
expected = [('netid1', 'net1', 'mysubid1 192.168.1.0/24'),
('netid2', 'net2', 'mysubid4 ')]
self._test_list_nets_extend_subnets(data, expected)
def test_list_nets_fields(self):
"""List nets: --fields a --fields b -- --fields c d."""
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_networks(cmd,
fields_1=['a', 'b'], fields_2=['c', 'd'])
def _test_list_nets_columns(self, cmd, returned_body,
args=['-f', 'json']):
resources = 'networks'
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
self._test_list_columns(cmd, resources, returned_body, args=args)
def test_list_nets_defined_column(self):
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
returned_body = {"networks": [{"name": "buildname3",
"id": "id3",
"tenant_id": "tenant_3",
"subnets": []}]}
self._test_list_nets_columns(cmd, returned_body,
args=['-f', 'json', '-c', 'id'])
_str = self.fake_stdout.make_string()
returned_networks = utils.loads(_str)
self.assertEquals(1, len(returned_networks))
net = returned_networks[0]
self.assertEquals(1, len(net))
self.assertEquals("id", net.keys()[0])
def test_list_nets_with_default_column(self):
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
returned_body = {"networks": [{"name": "buildname3",
"id": "id3",
"tenant_id": "tenant_3",
"subnets": []}]}
self._test_list_nets_columns(cmd, returned_body)
_str = self.fake_stdout.make_string()
returned_networks = utils.loads(_str)
self.assertEquals(1, len(returned_networks))
net = returned_networks[0]
self.assertEquals(3, len(net))
self.assertEquals(0, len(set(net) ^ set(cmd.list_columns)))
def test_list_external_nets_empty_with_column(self):
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: []}
resstr = self.client.serialize(reses)
# url method body
query = "router%3Aexternal=True&id=myfakeid"
args = ['-c', 'id', '--', '--id', 'myfakeid']
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.end_url(path, query), 'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token',
test_cli20.TOKEN)).AndReturn(
(test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertEquals('\n', _str)
def _test_list_external_nets(self, resources, cmd,
detail=False, tags=[],
fields_1=[], fields_2=[]):
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
self.mox.StubOutWithMock(network.ListNetwork, "extend_list")
network.ListNetwork.extend_list(mox.IsA(list), mox.IgnoreArg())
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: [{'id': 'myid1', },
{'id': 'myid2', }, ], }
resstr = self.client.serialize(reses)
# url method body
query = ""
args = detail and ['-D', ] or []
if fields_1:
for field in fields_1:
args.append('--fields')
args.append(field)
if tags:
args.append('--')
args.append("--tag")
for tag in tags:
args.append(tag)
if (not tags) and fields_2:
args.append('--')
if fields_2:
args.append("--fields")
for field in fields_2:
args.append(field)
fields_1.extend(fields_2)
for field in fields_1:
if query:
query += "&fields=" + field
else:
query = "fields=" + field
if query:
query += '&router%3Aexternal=True'
else:
query += 'router%3Aexternal=True'
for tag in tags:
if query:
query += "&tag=" + tag
else:
query = "tag=" + tag
if detail:
query = query and query + '&verbose=True' or 'verbose=True'
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.end_url(path, query), 'GET',
body=None,
headers=mox.ContainsKeyValue('X-Auth-Token', test_cli20.TOKEN)
).AndReturn((test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertTrue('myid1' in _str)
def test_list_external_nets_detail(self):
"""list external nets: -D."""
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources, cmd, True)
def test_list_external_nets_tags(self):
"""List external nets: -- --tags a b."""
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources,
cmd, tags=['a', 'b'])
def test_list_external_nets_detail_tags(self):
"""List external nets: -D -- --tags a b."""
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources, cmd,
detail=True, tags=['a', 'b'])
def test_list_externel_nets_fields(self):
"""List external nets: --fields a --fields b -- --fields c d."""
resources = "networks"
cmd = network.ListExternalNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_list_external_nets(resources, cmd,
fields_1=['a', 'b'],
fields_2=['c', 'd'])
def test_update_network_exception(self):
"""Update net: myid."""
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self.assertRaises(exceptions.CommandError, self._test_update_resource,
resource, cmd, 'myid', ['myid'], {})
def test_update_network(self):
"""Update net: myid --name myname --tags a b."""
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--tags', 'a', 'b'],
{'name': 'myname', 'tags': ['a', 'b'], }
)
def test_update_network_with_unicode(self):
"""Update net: myid --name u'\u7f51\u7edc' --tags a b."""
resource = 'network'
cmd = network.UpdateNetwork(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', u'\u7f51\u7edc',
'--tags', 'a', 'b'],
{'name': u'\u7f51\u7edc',
'tags': ['a', 'b'], }
)
def test_show_network(self):
"""Show net: --fields id --fields name myid."""
resource = 'network'
cmd = network.ShowNetwork(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args,
['id', 'name'])
def test_delete_network(self):
"""Delete net: myid."""
resource = 'network'
cmd = network.DeleteNetwork(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
def _test_extend_list(self, mox_calls):
data = [{'id': 'netid%d' % i, 'name': 'net%d' % i,
'subnets': ['mysubid%d' % i]}
for i in range(0, 10)]
self.mox.StubOutWithMock(self.client.httpclient, "request")
path = getattr(self.client, 'subnets_path')
cmd = network.ListNetwork(test_cli20.MyApp(sys.stdout), None)
self.mox.StubOutWithMock(cmd, "get_client")
cmd.get_client().MultipleTimes().AndReturn(self.client)
mox_calls(path, data)
self.mox.ReplayAll()
known_args, _vs = cmd.get_parser('create_subnets').parse_known_args()
cmd.extend_list(data, known_args)
self.mox.VerifyAll()
def _build_test_data(self, data):
subnet_ids = []
response = []
filters = ""
for n in data:
if 'subnets' in n:
subnet_ids.extend(n['subnets'])
for subnet_id in n['subnets']:
filters = "%s&id=%s" % (filters, subnet_id)
response.append({'id': subnet_id,
'cidr': '192.168.0.0/16'})
resp_str = self.client.serialize({'subnets': response})
resp = (test_cli20.MyResp(200), resp_str)
return filters, resp
def test_extend_list(self):
def mox_calls(path, data):
filters, response = self._build_test_data(data)
self.client.httpclient.request(
test_cli20.end_url(path, 'fields=id&fields=cidr' + filters),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(response)
self._test_extend_list(mox_calls)
def test_extend_list_exceed_max_uri_len(self):
def mox_calls(path, data):
sub_data_lists = [data[:len(data) - 1], data[len(data) - 1:]]
filters, response = self._build_test_data(data)
# 1 char of extra URI len will cause a split in 2 requests
self.mox.StubOutWithMock(self.client, "_check_uri_length")
self.client._check_uri_length(mox.IgnoreArg()).AndRaise(
exceptions.RequestURITooLong(excess=1))
for data in sub_data_lists:
filters, response = self._build_test_data(data)
self.client._check_uri_length(mox.IgnoreArg()).AndReturn(None)
self.client.httpclient.request(
test_cli20.end_url(path,
'fields=id&fields=cidr%s' % filters),
'GET',
body=None,
headers=mox.ContainsKeyValue(
'X-Auth-Token', test_cli20.TOKEN)).AndReturn(response)
self._test_extend_list(mox_calls)
class CLITestV20NetworkXML(CLITestV20NetworkJSON):
format = 'xml'
| {
"repo_name": "CiscoSystems/python-quantumclient",
"path": "tests/unit/test_cli20_network.py",
"copies": "1",
"size": "23095",
"license": "apache-2.0",
"hash": 2230391426569028600,
"line_mean": 42.330206379,
"line_max": 78,
"alpha_frac": 0.5397272137,
"autogenerated": false,
"ratio": 3.767536704730832,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4807263918430832,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Well-known service type constants:
FIREWALL = "FIREWALL"
VPN = "VPN"
METERING = "METERING"
FLAVORS = "FLAVORS"
QOS = "QOS"
CORE = 'CORE'
L3 = 'L3_ROUTER_NAT'
LOG_API = "LOGGING"
PORTFORWARDING = "PORTFORWARDING"
FLOATINGIPPOOL = "FLOATINGIPPOOL"
NETWORK_SEGMENT_RANGE = "NETWORK_SEGMENT_RANGE"
CONNTRACKHELPER = "CONNTRACKHELPER"
PLACEMENT_REPORT = "placement_report"
# TODO(johnsom) Remove after these stop being used. Neutron-LBaaS is now
# retired (train) and these should no longer be necessary.
LOADBALANCER = "LOADBALANCER"
LOADBALANCERV2 = "LOADBALANCERV2"
| {
"repo_name": "openstack/neutron-lib",
"path": "neutron_lib/plugins/constants.py",
"copies": "1",
"size": "1179",
"license": "apache-2.0",
"hash": 8570611612400072000,
"line_mean": 34.7272727273,
"line_max": 78,
"alpha_frac": 0.7311280746,
"autogenerated": false,
"ratio": 3.2659279778393353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9497056052439334,
"avg_score": 0,
"num_lines": 33
} |
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import fixtures
from neutron_lib import constants as n_consts
from oslo_utils import uuidutils
from neutron.agent import firewall
from neutron.agent.linux import ip_lib
from neutron.common import constants
from neutron.tests.common import machine_fixtures
from neutron.tests.common import net_helpers
# NOTE: IPv6 uses NDP for obtaining destination endpoints link address that
# extends round-trip packet time in ICMP tests. The timeout value should be
# sufficient for correct scenarios but not too high because of negative
# tests.
ICMP_VERSION_TIMEOUTS = {
n_consts.IP_VERSION_4: 1,
n_consts.IP_VERSION_6: 2,
}
class ConnectionTesterException(Exception):
pass
def _validate_direction(f):
@functools.wraps(f)
def wrap(self, direction, *args, **kwargs):
if direction not in (firewall.INGRESS_DIRECTION,
firewall.EGRESS_DIRECTION):
raise ConnectionTesterException('Unknown direction %s' % direction)
return f(self, direction, *args, **kwargs)
return wrap
class ConnectionTester(fixtures.Fixture):
"""Base class for testers
This class implements API for various methods for testing connectivity. The
concrete implementation relies on how encapsulated resources are
configured. That means child classes should define resources by themselves
(e.g. endpoints connected through linux bridge or ovs bridge).
"""
UDP = net_helpers.NetcatTester.UDP
TCP = net_helpers.NetcatTester.TCP
ICMP = constants.PROTO_NAME_ICMP
ARP = constants.ETHERTYPE_NAME_ARP
INGRESS = firewall.INGRESS_DIRECTION
EGRESS = firewall.EGRESS_DIRECTION
def __init__(self, ip_cidr):
self.ip_cidr = ip_cidr
def _setUp(self):
self._protocol_to_method = {
self.UDP: self._test_transport_connectivity,
self.TCP: self._test_transport_connectivity,
self.ICMP: self._test_icmp_connectivity,
self.ARP: self._test_arp_connectivity}
self._nc_testers = {}
self._pingers = {}
self.addCleanup(self.cleanup)
def cleanup(self):
for nc in self._nc_testers.values():
nc.stop_processes()
for pinger in self._pingers.values():
pinger.stop()
@property
def vm_namespace(self):
return self._vm.namespace
@property
def vm_ip_address(self):
return self._vm.ip
@property
def vm_ip_cidr(self):
return self._vm.ip_cidr
@vm_ip_cidr.setter
def vm_ip_cidr(self, ip_cidr):
self._vm.ip_cidr = ip_cidr
@property
def vm_mac_address(self):
return self._vm.port.link.address
@vm_mac_address.setter
def vm_mac_address(self, mac_address):
self._vm.mac_address = mac_address
@property
def peer_mac_address(self):
return self._peer.port.link.address
@peer_mac_address.setter
def peer_mac_address(self, mac_address):
self._peer.mac_address = mac_address
@property
def peer_namespace(self):
return self._peer.namespace
@property
def peer_ip_address(self):
return self._peer.ip
def set_vm_default_gateway(self, default_gw):
self._vm.set_default_gateway(default_gw)
def flush_arp_tables(self):
"""Flush arptables in all used namespaces"""
for machine in (self._peer, self._vm):
machine.port.neigh.flush(4, 'all')
def _test_transport_connectivity(self, direction, protocol, src_port,
dst_port):
nc_tester = self._create_nc_tester(direction, protocol, src_port,
dst_port)
try:
nc_tester.test_connectivity()
except RuntimeError as exc:
nc_tester.stop_processes()
raise ConnectionTesterException(
"%s connection over %s protocol with %s source port and "
"%s destination port can't be established: %s" % (
direction, protocol, src_port, dst_port, exc))
@_validate_direction
def _get_namespace_and_address(self, direction):
if direction == self.INGRESS:
return self.peer_namespace, self.vm_ip_address
return self.vm_namespace, self.peer_ip_address
def _test_icmp_connectivity(self, direction, protocol, src_port, dst_port):
src_namespace, ip_address = self._get_namespace_and_address(direction)
ip_version = ip_lib.get_ip_version(ip_address)
icmp_timeout = ICMP_VERSION_TIMEOUTS[ip_version]
try:
net_helpers.assert_ping(src_namespace, ip_address,
timeout=icmp_timeout)
except RuntimeError:
raise ConnectionTesterException(
"ICMP packets can't get from %s namespace to %s address" % (
src_namespace, ip_address))
def _test_arp_connectivity(self, direction, protocol, src_port, dst_port):
src_namespace, ip_address = self._get_namespace_and_address(direction)
try:
net_helpers.assert_arping(src_namespace, ip_address)
except RuntimeError:
raise ConnectionTesterException(
"ARP queries to %s address have no response from %s namespace"
% (ip_address, src_namespace))
@_validate_direction
def assert_connection(self, direction, protocol, src_port=None,
dst_port=None):
testing_method = self._protocol_to_method[protocol]
testing_method(direction, protocol, src_port, dst_port)
def assert_no_connection(self, direction, protocol, src_port=None,
dst_port=None):
try:
self.assert_connection(direction, protocol, src_port, dst_port)
except ConnectionTesterException:
pass
else:
dst_port_info = str()
src_port_info = str()
if dst_port is not None:
dst_port_info = " and destination port %d" % dst_port
if src_port is not None:
src_port_info = " and source port %d" % src_port
raise ConnectionTesterException("%s connection with protocol %s, "
"source port %s, destination "
"port %s was established but it "
"shouldn't be possible" % (
direction, protocol,
src_port_info, dst_port_info))
@_validate_direction
def assert_established_connection(self, direction, protocol, src_port=None,
dst_port=None):
nc_params = (direction, protocol, src_port, dst_port)
nc_tester = self._nc_testers.get(nc_params)
if nc_tester:
if nc_tester.is_established:
try:
nc_tester.test_connectivity()
except RuntimeError:
raise ConnectionTesterException(
"Established %s connection with protocol %s, source "
"port %s and destination port %s can no longer "
"communicate")
else:
nc_tester.stop_processes()
raise ConnectionTesterException(
'%s connection with protocol %s, source port %s and '
'destination port %s is not established' % nc_params)
else:
raise ConnectionTesterException(
"Attempting to test established %s connection with protocol %s"
", source port %s and destination port %s that hasn't been "
"established yet by calling establish_connection()"
% nc_params)
def assert_no_established_connection(self, direction, protocol,
src_port=None, dst_port=None):
try:
self.assert_established_connection(direction, protocol, src_port,
dst_port)
except ConnectionTesterException:
pass
else:
raise ConnectionTesterException(
'Established %s connection with protocol %s, source port %s, '
'destination port %s can still send packets through' % (
direction, protocol, src_port, dst_port))
@_validate_direction
def establish_connection(self, direction, protocol, src_port=None,
dst_port=None):
nc_tester = self._create_nc_tester(direction, protocol, src_port,
dst_port)
nc_tester.establish_connection()
self.addCleanup(nc_tester.stop_processes)
def _create_nc_tester(self, direction, protocol, src_port, dst_port):
"""Create netcat tester
If there already exists a netcat tester that has established
connection, exception is raised.
"""
nc_key = (direction, protocol, src_port, dst_port)
nc_tester = self._nc_testers.get(nc_key)
if nc_tester and nc_tester.is_established:
raise ConnectionTesterException(
'%s connection using %s protocol, source port %s and '
'destination port %s is already established' % (
direction, protocol, src_port, dst_port))
if direction == self.INGRESS:
client_ns = self.peer_namespace
server_ns = self.vm_namespace
server_addr = self.vm_ip_address
else:
client_ns = self.vm_namespace
server_ns = self.peer_namespace
server_addr = self.peer_ip_address
server_port = dst_port or net_helpers.get_free_namespace_port(
protocol, server_ns)
nc_tester = net_helpers.NetcatTester(client_namespace=client_ns,
server_namespace=server_ns,
address=server_addr,
protocol=protocol,
src_port=src_port,
dst_port=server_port)
self._nc_testers[nc_key] = nc_tester
return nc_tester
def _get_pinger(self, direction):
try:
pinger = self._pingers[direction]
except KeyError:
src_namespace, dst_address = self._get_namespace_and_address(
direction)
pinger = net_helpers.Pinger(src_namespace, dst_address)
self._pingers[direction] = pinger
return pinger
def start_sending_icmp(self, direction):
pinger = self._get_pinger(direction)
pinger.start()
def stop_sending_icmp(self, direction):
pinger = self._get_pinger(direction)
pinger.stop()
def get_sent_icmp_packets(self, direction):
pinger = self._get_pinger(direction)
return pinger.sent
def get_received_icmp_packets(self, direction):
pinger = self._get_pinger(direction)
return pinger.received
def assert_net_unreachable(self, direction, destination):
src_namespace, dst_address = self._get_namespace_and_address(
direction)
pinger = net_helpers.Pinger(src_namespace, destination, count=5)
pinger.start()
pinger.wait()
if not pinger.destination_unreachable:
raise ConnectionTesterException(
'No Host Destination Unreachable packets were received when '
'sending icmp packets to %s' % destination)
class OVSConnectionTester(ConnectionTester):
"""Tester with OVS bridge in the middle
The endpoints are created as OVS ports attached to the OVS bridge.
NOTE: The OVS ports are connected from the namespace. This connection is
currently not supported in OVS and may lead to unpredicted behavior:
https://bugzilla.redhat.com/show_bug.cgi?id=1160340
"""
def _setUp(self):
super(OVSConnectionTester, self)._setUp()
self.bridge = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
self._peer, self._vm = self.useFixture(
machine_fixtures.PeerMachines(
self.bridge, self.ip_cidr)).machines
self._set_port_attrs(self._peer.port)
self._set_port_attrs(self._vm.port)
def _set_port_attrs(self, port):
port.id = uuidutils.generate_uuid()
attrs = [('type', 'internal'),
('external_ids', {
'iface-id': port.id,
'iface-status': 'active',
'attached-mac': port.link.address})]
for column, value in attrs:
self.bridge.set_db_attribute('Interface', port.name, column, value)
@property
def peer_port_id(self):
return self._peer.port.id
@property
def vm_port_id(self):
return self._vm.port.id
def set_tag(self, port_name, tag):
self.bridge.set_db_attribute('Port', port_name, 'tag', tag)
def set_vm_tag(self, tag):
self.set_tag(self._vm.port.name, tag)
def set_peer_tag(self, tag):
self.set_tag(self._peer.port.name, tag)
class LinuxBridgeConnectionTester(ConnectionTester):
"""Tester with linux bridge in the middle
Both endpoints are placed in their separated namespace connected to
bridge's namespace via veth pair.
"""
def _setUp(self):
super(LinuxBridgeConnectionTester, self)._setUp()
self.bridge = self.useFixture(net_helpers.LinuxBridgeFixture()).bridge
self._peer, self._vm = self.useFixture(
machine_fixtures.PeerMachines(
self.bridge, self.ip_cidr)).machines
@property
def bridge_namespace(self):
return self.bridge.namespace
@property
def vm_port_id(self):
return net_helpers.VethFixture.get_peer_name(self._vm.port.name)
@property
def peer_port_id(self):
return net_helpers.VethFixture.get_peer_name(self._peer.port.name)
def flush_arp_tables(self):
self.bridge.neigh.flush(4, 'all')
super(LinuxBridgeConnectionTester, self).flush_arp_tables()
| {
"repo_name": "wolverineav/neutron",
"path": "neutron/tests/common/conn_testers.py",
"copies": "3",
"size": "14917",
"license": "apache-2.0",
"hash": -3876978145615188500,
"line_mean": 36.5743073048,
"line_max": 79,
"alpha_frac": 0.5981095395,
"autogenerated": false,
"ratio": 4.24622829490464,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 397
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron_lib.utils import net
from oslo_log import log as logging
from oslo_utils import uuidutils
import testtools
from neutron.services.trunk.drivers.openvswitch.agent import trunk_manager
from neutron.services.trunk.drivers.openvswitch import utils
from neutron.tests.common import conn_testers
from neutron.tests.common import helpers
from neutron.tests.common import net_helpers
from neutron.tests.functional import base
from neutron.tests.functional import constants as test_constants
LOG = logging.getLogger(__name__)
VLAN_RANGE = set(range(1, test_constants.VLAN_COUNT - 1))
class FakeOVSDBException(Exception):
pass
class TrunkParentPortTestCase(base.BaseSudoTestCase):
def setUp(self):
super(TrunkParentPortTestCase, self).setUp()
trunk_id = uuidutils.generate_uuid()
port_id = uuidutils.generate_uuid()
port_mac = net.get_random_mac('fa:16:3e:00:00:00'.split(':'))
self.trunk = trunk_manager.TrunkParentPort(trunk_id, port_id, port_mac)
self.trunk.bridge = self.useFixture(
net_helpers.OVSTrunkBridgeFixture(
self.trunk.bridge.br_name)).bridge
self.br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
def test_plug(self):
self.trunk.plug(self.br_int)
self.assertIn(self.trunk.patch_port_trunk_name,
self.trunk.bridge.get_port_name_list())
self.assertIn(self.trunk.patch_port_int_name,
self.br_int.get_port_name_list())
def test_plug_failure_doesnt_create_ports(self):
with mock.patch.object(
self.trunk.bridge.ovsdb, 'db_set',
side_effect=FakeOVSDBException):
with testtools.ExpectedException(FakeOVSDBException):
self.trunk.plug(self.br_int)
self.assertNotIn(self.trunk.patch_port_trunk_name,
self.trunk.bridge.get_port_name_list())
self.assertNotIn(self.trunk.patch_port_int_name,
self.br_int.get_port_name_list())
def test_unplug(self):
self.trunk.plug(self.br_int)
self.trunk.unplug(self.br_int)
self.assertFalse(
self.trunk.bridge.bridge_exists(self.trunk.bridge.br_name))
self.assertNotIn(self.trunk.patch_port_int_name,
self.br_int.get_port_name_list())
def test_unplug_failure_doesnt_delete_bridge(self):
self.trunk.plug(self.br_int)
with mock.patch.object(
self.trunk.bridge.ovsdb, 'del_port',
side_effect=FakeOVSDBException):
with testtools.ExpectedException(FakeOVSDBException):
self.trunk.unplug(self.br_int)
self.assertTrue(
self.trunk.bridge.bridge_exists(self.trunk.bridge.br_name))
self.assertIn(self.trunk.patch_port_trunk_name,
self.trunk.bridge.get_port_name_list())
self.assertIn(self.trunk.patch_port_int_name,
self.br_int.get_port_name_list())
class SubPortTestCase(base.BaseSudoTestCase):
def setUp(self):
super(SubPortTestCase, self).setUp()
trunk_id = uuidutils.generate_uuid()
port_id = uuidutils.generate_uuid()
port_mac = net.get_random_mac('fa:16:3e:00:00:00'.split(':'))
trunk_bridge_name = utils.gen_trunk_br_name(trunk_id)
trunk_bridge = self.useFixture(
net_helpers.OVSTrunkBridgeFixture(trunk_bridge_name)).bridge
segmentation_id = helpers.get_not_used_vlan(
trunk_bridge, VLAN_RANGE)
self.subport = trunk_manager.SubPort(
trunk_id, port_id, port_mac, segmentation_id)
self.subport.bridge = trunk_bridge
self.br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
def test_plug(self):
self.subport.plug(self.br_int)
self.assertIn(self.subport.patch_port_trunk_name,
self.subport.bridge.get_port_name_list())
self.assertIn(self.subport.patch_port_int_name,
self.br_int.get_port_name_list())
self.assertEqual(
self.subport.segmentation_id,
self.subport.bridge.db_get_val(
'Port', self.subport.patch_port_trunk_name, 'tag'))
def test_plug_failure_doesnt_create_ports(self):
with mock.patch.object(
self.subport.bridge.ovsdb, 'db_set',
side_effect=FakeOVSDBException):
with testtools.ExpectedException(FakeOVSDBException):
self.subport.plug(self.br_int)
self.assertNotIn(self.subport.patch_port_trunk_name,
self.subport.bridge.get_port_name_list())
self.assertNotIn(self.subport.patch_port_int_name,
self.br_int.get_port_name_list())
def test_unplug(self):
self.subport.plug(self.br_int)
self.subport.unplug(self.br_int)
self.assertNotIn(self.subport.patch_port_trunk_name,
self.subport.bridge.get_port_name_list())
self.assertNotIn(self.subport.patch_port_int_name,
self.br_int.get_port_name_list())
def test_unplug_failure(self):
self.subport.plug(self.br_int)
with mock.patch.object(
self.subport.bridge.ovsdb, 'del_port',
side_effect=FakeOVSDBException):
with testtools.ExpectedException(FakeOVSDBException):
self.subport.unplug(self.br_int)
self.assertIn(self.subport.patch_port_trunk_name,
self.subport.bridge.get_port_name_list())
self.assertIn(self.subport.patch_port_int_name,
self.br_int.get_port_name_list())
class TrunkManagerTestCase(base.BaseSudoTestCase):
net1_cidr = '192.178.0.1/24'
net2_cidr = '192.168.0.1/24'
def setUp(self):
super(TrunkManagerTestCase, self).setUp()
trunk_id = uuidutils.generate_uuid()
self.tester = self.useFixture(
conn_testers.OVSTrunkConnectionTester(
self.net1_cidr, utils.gen_trunk_br_name(trunk_id)))
self.trunk_manager = trunk_manager.TrunkManager(
self.tester.bridge)
self.trunk = trunk_manager.TrunkParentPort(
trunk_id, uuidutils.generate_uuid())
def test_connectivity(self):
"""Test connectivity with trunk and sub ports.
In this test we create a vm that has a trunk on net1 and a vm peer on
the same network. We check connectivity between the peer and the vm.
We create a sub port on net2 and a peer, check connectivity again.
"""
vlan_net1 = helpers.get_not_used_vlan(self.tester.bridge, VLAN_RANGE)
vlan_net2 = helpers.get_not_used_vlan(self.tester.bridge, VLAN_RANGE)
trunk_mac = net.get_random_mac('fa:16:3e:00:00:00'.split(':'))
sub_port_mac = net.get_random_mac('fa:16:3e:00:00:00'.split(':'))
sub_port_segmentation_id = helpers.get_not_used_vlan(
self.tester.bridge, VLAN_RANGE)
LOG.debug("Using %(n1)d vlan tag as local vlan ID for net1 and %(n2)d "
"for local vlan ID for net2", {
'n1': vlan_net1, 'n2': vlan_net2})
self.tester.set_peer_tag(vlan_net1)
self.trunk_manager.create_trunk(self.trunk.trunk_id,
self.trunk.port_id,
trunk_mac)
# tag the patch port, this should be done by the ovs agent but we mock
# it for this test
conn_testers.OVSBaseConnectionTester.set_tag(
self.trunk.patch_port_int_name, self.tester.bridge, vlan_net1)
self.tester.wait_for_connection(self.tester.INGRESS)
self.tester.wait_for_connection(self.tester.EGRESS)
self.tester.add_vlan_interface_and_peer(sub_port_segmentation_id,
self.net2_cidr)
conn_testers.OVSBaseConnectionTester.set_tag(
self.tester._peer2.port.name, self.tester.bridge, vlan_net2)
sub_port = trunk_manager.SubPort(self.trunk.trunk_id,
uuidutils.generate_uuid(),
sub_port_mac,
sub_port_segmentation_id)
self.trunk_manager.add_sub_port(sub_port.trunk_id,
sub_port.port_id,
sub_port.port_mac,
sub_port.segmentation_id)
# tag the patch port, this should be done by the ovs agent but we mock
# it for this test
conn_testers.OVSBaseConnectionTester.set_tag(
sub_port.patch_port_int_name, self.tester.bridge, vlan_net2)
self.tester.wait_for_sub_port_connectivity(self.tester.INGRESS)
self.tester.wait_for_sub_port_connectivity(self.tester.EGRESS)
self.trunk_manager.remove_sub_port(sub_port.trunk_id,
sub_port.port_id)
self.tester.wait_for_sub_port_no_connectivity(self.tester.INGRESS)
self.tester.wait_for_sub_port_no_connectivity(self.tester.EGRESS)
self.trunk_manager.remove_trunk(self.trunk.trunk_id,
self.trunk.port_id)
self.tester.wait_for_no_connection(self.tester.INGRESS)
class TrunkManagerDisposeTrunkTestCase(base.BaseSudoTestCase):
def setUp(self):
super(TrunkManagerDisposeTrunkTestCase, self).setUp()
trunk_id = uuidutils.generate_uuid()
self.trunk = trunk_manager.TrunkParentPort(
trunk_id, uuidutils.generate_uuid())
self.trunk.bridge = self.useFixture(
net_helpers.OVSTrunkBridgeFixture(
self.trunk.bridge.br_name)).bridge
self.br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
self.trunk_manager = trunk_manager.TrunkManager(
self.br_int)
def test_dispose_trunk(self):
self.trunk.plug(self.br_int)
self.trunk_manager.dispose_trunk(self.trunk.bridge)
self.assertFalse(
self.trunk.bridge.bridge_exists(self.trunk.bridge.br_name))
self.assertNotIn(self.trunk.patch_port_int_name,
self.br_int.get_port_name_list())
| {
"repo_name": "mahak/neutron",
"path": "neutron/tests/functional/services/trunk/drivers/openvswitch/agent/test_trunk_manager.py",
"copies": "2",
"size": "10994",
"license": "apache-2.0",
"hash": 6042485247185596000,
"line_mean": 43.1526104418,
"line_max": 79,
"alpha_frac": 0.6213389121,
"autogenerated": false,
"ratio": 3.6769230769230767,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5298261989023076,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import requests
import time
import copy
from oslo_log import log as logging
from oslo_serialization import jsonutils
from tacker.vnfm.monitor_drivers import abstract_driver
from tacker.vnfm.monitor_drivers.zabbix import zabbix_api as zapi
LOG = logging.getLogger(__name__)
class VNFMonitorZabbix(abstract_driver.VNFMonitorAbstractDriver):
params = ['application', 'OS']
def __init__(self):
self.kwargs = None
self.vnf = None
self.vduname = []
self.URL = None
self.hostinfo = {}
self.tenant_id = None
def get_type(self):
"""Return one of predefined type of the hosting vnf drivers."""
plugin_type = 'zabbix'
return plugin_type
def get_name(self):
"""Return a symbolic name for the VNF Monitor plugin."""
plugin_name = 'zabbix'
return plugin_name
def get_description(self):
"""Return description of VNF Monitor plugin."""
plugin_descript = 'Tacker VNFMonitor Zabbix Driver'
return plugin_descript
def monitor_get_config(self, plugin, context, vnf):
"""Return dict of monitor configuration data.
:param plugin:
:param context:
:param vnf:
:returns: dict
:returns: dict of monitor configuration data
"""
return {}
def monitor_url(self, plugin, context, vnf):
"""Return the url of vnf to monitor.
:param plugin:
:param context:
:param vnf:
:returns: string
:returns: url of vnf to monitor
"""
pass
def send_post(self, query):
response = requests.post(self.URL, headers=zapi.HEADERS,
data=jsonutils.dump_as_bytes(query))
return dict(response.json())
@staticmethod
def check_error(response):
try:
if 'result' not in response:
raise ValueError
except ValueError:
LOG.error('Cannot request error : %s', response['error']['data'])
def create_graph(self, itemid, name, nodename):
temp_graph_api = copy.deepcopy(zapi.dGRAPH_CREATE_API)
gitems = [{'itemid': itemid, 'color': '00AA00'}]
temp_graph_api['auth'] = \
self.hostinfo[nodename]['zbx_info']['zabbix_token']
temp_graph_api['params']['gitems'] = gitems
temp_graph_api['params']['name'] = name
response = self.send_post(temp_graph_api)
VNFMonitorZabbix.check_error(response)
def create_action(self):
for vdu in self.vduname:
temp_action_api = copy.deepcopy(zapi.dACTION_CREATE_API)
temp_action_api['auth'] = \
self.hostinfo[vdu]['zbx_info']['zabbix_token']
tempname_api = temp_action_api['params']['operations'][0]
temp_filter = temp_action_api['params']['filter']
for info in (self.hostinfo[vdu]['actioninfo']):
tempname_api['opcommand_hst'][0]['hostid'] = \
self.hostinfo[vdu]['hostid']
now = time.localtime()
rtime = str(now.tm_hour) + str(now.tm_min) + str(now.tm_sec)
temp_name = "Trigger Action " + \
str(
vdu + rtime + " " +
info['item'] + " " + info['action']
)
temp_action_api['params']['name'] = temp_name
if (info['action'] == 'cmd') and \
(info['item'] != 'os_agent_info'):
tempname_api['opcommand']['command'] = info['cmd-action']
elif (info['item'] == 'os_agent_info') \
and (info['action'] == 'cmd'):
tempname_api['opcommand']['authtype'] = 0
tempname_api['opcommand']['username'] = \
self.hostinfo[vdu]['appinfo']['ssh_username']
tempname_api['opcommand']['password'] = \
self.hostinfo[vdu]['appinfo']['ssh_password']
tempname_api['opcommand']['type'] = 2
tempname_api['opcommand']['command'] = info['cmd-action']
tempname_api['opcommand']['port'] = 22
temp_filter['conditions'][0]['value'] = info['trigger_id']
response = self.send_post(temp_action_api)
VNFMonitorZabbix.check_error(response)
continue
temp_filter['conditions'][0]['value'] = info['trigger_id']
response = self.send_post(temp_action_api)
VNFMonitorZabbix.check_error(response)
def create_vdu_host(self):
for vdu in self.vduname:
temp_host_api = zapi.dHOST_CREATE_API
temp_group_api = zapi.dGROUP_GET_API
temp_host_api['auth'] = \
self.hostinfo[vdu]['zbx_info']['zabbix_token']
temp_group_api['auth'] = \
self.hostinfo[vdu]['zbx_info']['zabbix_token']
response = self.send_post(temp_group_api)
gid = response['result'][0]['groupid']
temp_host_api['params']['host'] = str(vdu)
if type(self.hostinfo[vdu]['mgmt_ip']) is list:
for vduip in (self.hostinfo[vdu]['mgmt_ip']):
temp_host_api['params']['interfaces'][0]['ip'] = vduip
temp_host_api['params']['templates'][0]['templateid'] = \
self.hostinfo[vdu]['template_id'][0]
temp_host_api['params']['groups'][0]['groupid'] = gid
response = self.send_post(temp_host_api)
else:
temp_host_api['params']['interfaces'][0]['ip'] = \
self.hostinfo[vdu]['mgmt_ip']
temp_host_api['params']['templates'][0]['templateid'] = \
self.hostinfo[vdu]['template_id'][0]
temp_host_api['params']['groups'][0]['groupid'] = gid
response = self.send_post(temp_host_api)
if 'error' in response:
now = time.localtime()
rtime = str(now.tm_hour) + str(now.tm_min) + str(now.tm_sec)
temp_host_api['params']['host'] = str(vdu) + rtime
response = self.send_post(temp_host_api)
self.hostinfo[vdu]['hostid'] = response['result']['hostids'][0]
def create_trigger(self, trigger_params, vduname):
temp_trigger_api = copy.deepcopy(zapi.dTRIGGER_CREATE_API)
temp_trigger_api['auth'] = \
self.hostinfo[vduname]['zbx_info']['zabbix_token']
temp_trigger_api['params'] = trigger_params
temp_trigger_api['templateid'] = \
str(
self.hostinfo[vduname]['template_id'][0])
response = self.send_post(temp_trigger_api)
VNFMonitorZabbix.check_error(response)
return response['result']
def _create_trigger(self):
trigger_params = []
trig_act_pa = []
for vdu in self.vduname:
temp_trigger_list = copy.deepcopy(zapi.dTRIGGER_LIST)
temp_vdu_name = self.hostinfo[vdu]['appinfo']['app_name']
temp_vdu_port = self.hostinfo[vdu]['appinfo']['app_port']
for para in VNFMonitorZabbix.params:
for item in self.hostinfo[vdu]['parameters'][para]:
action_list = copy.deepcopy(zapi.dACTION_LIST)
temp_item = self.hostinfo[vdu]['parameters'][para][item]
if ('app_name' != item)\
and ('app_port' != item) \
and ('ssh_username' != item) \
and ('ssh_password' != item):
if 'condition' \
in temp_item:
temp_con = temp_item['condition']
if len(temp_con) == 2:
temp_comparrision = temp_con[0]
temp_comparrision_value = temp_con[1]
temp_trigger_list[item][0]['expression'] += \
self.hostinfo[vdu]['template_name'] + ':'\
+ str(
zapi.dITEM_KEY_COMP[item].replace(
'*', str(temp_vdu_name))) \
+ str(
zapi.COMP_VALUE[temp_comparrision]) \
+ str(
temp_comparrision_value)
else:
temp_comparrision = temp_con[0]
if 'os_agent_info' == item:
temp_trigger_list[item][0]['expression'] += \
self.hostinfo[vdu]['template_name'] + ':' \
+ str(zapi.dITEM_KEY_COMP[item])
else:
temp_trigger_list[item][0]['expression'] += \
self.hostinfo[vdu]['template_name'] + ':' \
+ str(
zapi.dITEM_KEY_COMP[item].replace(
'*', str(temp_vdu_port))) \
+ str(
zapi.COMP_VALUE[temp_comparrision])
if 'actionname' in temp_item:
trig_act_pa.append(temp_trigger_list[item][0])
response = self.create_trigger(trig_act_pa, vdu)
del trig_act_pa[:]
action_list['action'] = \
temp_item['actionname']
action_list['trigger_id'] = \
response['triggerids'][0]
action_list['item'] = item
if 'cmd' == \
temp_item['actionname']:
action_list['cmd-action'] = \
temp_item['cmd-action']
self.hostinfo[vdu]['actioninfo'].append(
action_list)
else:
trigger_params.append(
temp_trigger_list[item][0])
if len(trigger_params) != 0:
self.create_trigger(trigger_params, vdu)
del trigger_params[:]
def create_item(self):
# Create _ITEM
for vdu in self.vduname:
temp_item_api = copy.deepcopy(zapi.dITEM_CREATE_API)
temp_item_api['auth'] = \
self.hostinfo[vdu]['zbx_info']['zabbix_token']
self.hostinfo[vdu]['appinfo'] = \
copy.deepcopy(zapi.dAPP_INFO)
temp_app = self.hostinfo[vdu]['parameters']['application']
temp_item_api['params']['hostid'] = \
self.hostinfo[vdu]['template_id'][0]
for para in VNFMonitorZabbix.params:
if 'application' == para:
for app_info in temp_app:
self.hostinfo[vdu]['appinfo'][app_info] = \
temp_app[app_info]
for item in self.hostinfo[vdu]['parameters'][para]:
if ('app_name' != item) and ('app_port' != item) \
and ('ssh_username' != item) \
and ('ssh_password' != item):
temp_item_api['params']['name'] = \
zapi.dITEM_KEY_INFO[item]['name']
temp_item_api['params']['value_type'] = \
zapi.dITEM_KEY_INFO[item]['value_type']
if item == 'app_status':
temp = zapi.dITEM_KEY_INFO[item]['key_']
temp_item_api['params']['key_'] = temp.replace(
'*', str(
self.hostinfo[vdu]['appinfo']['app_port']))
elif item == 'app_memory':
temp = zapi.dITEM_KEY_INFO[item]['key_']
temp_item_api['params']['key_'] = temp.replace(
'*',
str(
self.hostinfo[vdu]['appinfo']['app_name']))
else:
temp_item_api['params']['key_'] = \
zapi.dITEM_KEY_INFO[item]['key_']
response = self.send_post(temp_item_api)
self.create_graph(
response['result']['itemids'][0],
temp_item_api['params']['name'], vdu)
VNFMonitorZabbix.check_error(response)
def create_template(self):
temp_template_api = copy.deepcopy(zapi.dTEMPLATE_CREATE_API)
for vdu in self.vduname:
temp_template_api['params']['host'] = "Tacker Template " + str(vdu)
temp_template_api['auth'] = \
self.hostinfo[vdu]['zbx_info']['zabbix_token']
response = self.send_post(temp_template_api)
if 'error' in response:
if "already exists." in response['error']['data']:
now = time.localtime()
rtime = str(now.tm_hour) + str(now.tm_min) + str(
now.tm_sec)
temp_template_api['params']['host'] = \
"Tacker Template " + str(vdu) + rtime
response = self.send_post(temp_template_api)
VNFMonitorZabbix.check_error(response)
self.hostinfo[vdu]['template_id'] = \
response['result']['templateids']
self.hostinfo[vdu]['template_name'] =\
temp_template_api['params']['host']
def add_host_to_zabbix(self):
self.create_template()
self.create_item()
self._create_trigger()
self.create_vdu_host()
self.create_action()
def get_token_from_zbxserver(self, node):
temp_auth_api = copy.deepcopy(zapi.dAUTH_API)
temp_auth_api['params']['user'] = \
self.hostinfo[node]['zbx_info']['zabbix_user']
temp_auth_api['params']['password'] = \
self.hostinfo[node]['zbx_info']['zabbix_pass']
zabbixip = \
self.hostinfo[node]['zbx_info']['zabbix_ip']
zabbixport = \
self.hostinfo[node]['zbx_info']['zabbix_port']
self.URL = "http://" + zabbixip + ":" + \
str(zabbixport) + zapi.URL
if netaddr.valid_ipv6(zabbixip):
self.URL = "http://[" + zabbixip + "]:" + \
str(zabbixport) + zapi.URL
response = requests.post(
self.URL,
headers=zapi.HEADERS,
data=jsonutils.dump_as_bytes(temp_auth_api)
)
response_dict = dict(response.json())
VNFMonitorZabbix.check_error(response_dict)
LOG.info('Success Connect Zabbix Server')
return response_dict['result']
def set_zbx_info(self, node):
self.hostinfo[node]['zbx_info'] = \
copy.deepcopy(zapi.dZBX_INFO)
self.hostinfo[node]['zbx_info']['zabbix_user'] = \
self.kwargs['vdus'][node]['zabbix_username']
self.hostinfo[node]['zbx_info']['zabbix_pass'] = \
self.kwargs['vdus'][node]['zabbix_password']
self.hostinfo[node]['zbx_info']['zabbix_ip'] = \
self.kwargs['vdus'][node]['zabbix_server_ip']
self.hostinfo[node]['zbx_info']['zabbix_port'] = \
self.kwargs['vdus'][node]['zabbix_server_port']
self.hostinfo[node]['zbx_info']['zabbix_token'] = \
self.get_token_from_zbxserver(node)
def set_vdu_info(self):
temp_vduname = self.kwargs['vdus'].keys()
for node in temp_vduname:
if 'application' in \
self.kwargs['vdus'][node]['parameters'] \
and 'OS'\
in self.kwargs['vdus'][node]['parameters']:
self.vduname.append(node)
self.hostinfo[node] = copy.deepcopy(zapi.dVDU_INFO)
self.set_zbx_info(node)
self.hostinfo[node]['mgmt_ip'] = \
self.kwargs['vdus'][node]['mgmt_ip']
self.hostinfo[node]['parameters'] = \
self.kwargs['vdus'][node]['parameters']
self.hostinfo[node]['vdu_id'] = self.vnf['id']
def add_to_appmonitor(self, vnf, kwargs):
self.__init__()
self.kwargs = kwargs
self.vnf = vnf
self.set_vdu_info()
self.tenant_id = self.vnf['vnfd']['tenant_id']
self.add_host_to_zabbix()
def monitor_call(self, vnf, kwargs):
pass
def monitor_app_driver(self, plugin, context, vnf, service_instance):
return self.get_name()
| {
"repo_name": "stackforge/tacker",
"path": "tacker/vnfm/monitor_drivers/zabbix/zabbix.py",
"copies": "2",
"size": "17844",
"license": "apache-2.0",
"hash": 5478007716620718000,
"line_mean": 41.8942307692,
"line_max": 79,
"alpha_frac": 0.4844205335,
"autogenerated": false,
"ratio": 4.108680635505411,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 416
} |
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <ORGANIZATION> nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
import re
from email.utils import parseaddr
import trac
from trac.core import *
from trac.config import ListOption, Option
import announcer
from announcer.distributors.mail import IAnnouncementEmailDecorator
from announcer.util.mail import set_header, msgid, next_decorator, uid_encode
class ThreadingEmailDecorator(Component):
"""Add Message-ID, In-Reply-To and References message headers for resources.
All message ids are derived from the properties of the ticket so that they
can be regenerated later.
"""
implements(IAnnouncementEmailDecorator)
supported_realms = ListOption('announcer', 'email_threaded_realms',
['ticket', 'wiki'],
"""These are realms with announcements that should be threaded
emails. In order for email threads to work, the announcer
system needs to give the email recreatable Message-IDs based
on the resources in the realm. The resources must have a unique
and immutable id, name or str() representation in it's realm
""")
def decorate_message(self, event, message, decorates=None):
"""
Added headers to the outgoing email to track it's relationship
with a ticket.
References, In-Reply-To and Message-ID are just so email clients can
make sense of the threads.
"""
if event.realm in self.supported_realms:
uid = uid_encode(self.env.abs_href(), event.realm, event.target)
email_from = self.config.get('announcer', 'email_from', 'localhost')
_, email_addr = parseaddr(email_from)
host = re.sub('^.+@', '', email_addr)
mymsgid = msgid(uid, host)
if event.category == 'created':
set_header(message, 'Message-ID', mymsgid)
else:
set_header(message, 'In-Reply-To', mymsgid)
set_header(message, 'References', mymsgid)
return next_decorator(event, message, decorates)
class StaticEmailDecorator(Component):
"""The static ticket decorator implements a policy to -always- send an
email to a certain address.
Controlled via the always_cc and always_bcc option in the announcer section
of the trac.ini. If no subscribers are found, then even if always_cc and
always_bcc addresses are specified, no announcement will be sent. Since
these fields are added after announcers subscription system, filters such
as never_announce and never_notify author won't work with these addresses.
These settings are considered dangerous if you are using the verify email
or reset password features of the accountmanager plugin.
"""
implements(IAnnouncementEmailDecorator)
always_cc = Option("announcer", "email_always_cc", None,
"""Email addresses specified here will always
be cc'd on all announcements. This setting is dangerous if
accountmanager is present.
""")
always_bcc = Option("announcer", "email_always_bcc", None,
"""Email addresses specified here will always
be bcc'd on all announcements. This setting is dangerous if
accountmanager is present.
""")
def decorate_message(self, event, message, decorates=None):
for k, v in {'Cc': self.always_cc, 'Bcc': self.always_bcc}.items():
if v:
self.log.debug("StaticEmailDecorator added '%s' "
"because of rule: email_always_%s"%(v, k.lower())),
if message[k] and len(str(message[k]).split(',')) > 0:
recips = ", ".join(str(message[k]), v)
else:
recips = v
set_header(message, k, recips)
return next_decorator(event, message, decorates)
class AnnouncerEmailDecorator(Component):
"""Add some boring headers that should be set."""
implements(IAnnouncementEmailDecorator)
def decorate_message(self, event, message, decorators):
mailer = 'AnnouncerPlugin v%s on Trac v%s'%(
announcer.__version__,
trac.__version__
)
set_header(message, 'Auto-Submitted', 'auto-generated')
set_header(message, 'Precedence', 'bulk')
set_header(message, 'X-Announcer-Version', announcer.__version__)
set_header(message, 'X-Mailer', mailer)
set_header(message, 'X-Trac-Announcement-Realm', event.realm)
set_header(message, 'X-Trac-Project', self.env.project_name)
set_header(message, 'X-Trac-Version', trac.__version__)
return next_decorator(event, message, decorators)
| {
"repo_name": "dokipen/trac-announcer-plugin",
"path": "announcer/email_decorators/generic.py",
"copies": "1",
"size": "6206",
"license": "bsd-3-clause",
"hash": 5889891073603105000,
"line_mean": 43.9710144928,
"line_max": 80,
"alpha_frac": 0.6720915243,
"autogenerated": false,
"ratio": 4.33986013986014,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008452434192663219,
"num_lines": 138
} |
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from time import sleep, time
import ctypes as ct
from ctypes.util import find_library
# linux only!
from PyQt4.QtCore import QThread, SIGNAL
assert("linux" in sys.platform)
x11 = ct.cdll.LoadLibrary(find_library("X11"))
display = x11.XOpenDisplay(None)
# this will hold the keyboard state. 32 bytes, with each
# bit representing the state for a single key.
keyboard = (ct.c_char * 32)()
# these are the locations (byte, byte value) of special
# keys to watch
shift_keys = ((6,4), (7,64))
modifiers = {
"left shift": (6,4),
"right shift": (7,64),
"left ctrl": (4,32),
"right ctrl": (13,2),
"left alt": (8,1),
"right alt": (13,16)
}
last_pressed = set()
last_pressed_adjusted = set()
last_modifier_state = {}
caps_lock_state = 0
# key is byte number, value is a dictionary whose
# keys are values for that byte, and values are the
# keys corresponding to those byte values
key_mapping = {
1: {
0b00000010: "<esc>",
0b00000100: ("1", "!"),
0b00001000: ("2", "@"),
0b00010000: ("3", "#"),
0b00100000: ("4", "$"),
0b01000000: ("5", "%"),
0b10000000: ("6", "^"),
},
2: {
0b00000001: ("7", "&"),
0b00000010: ("8", "*"),
0b00000100: ("9", "("),
0b00001000: ("0", ")"),
0b00010000: ("-", "_"),
0b00100000: ("=", "+"),
0b01000000: "<backspace>",
0b10000000: "<tab>",
},
3: {
0b00000001: ("q", "Q"),
0b00000010: ("w", "W"),
0b00000100: ("e", "E"),
0b00001000: ("r", "R"),
0b00010000: ("t", "T"),
0b00100000: ("y", "Y"),
0b01000000: ("u", "U"),
0b10000000: ("i", "I"),
},
4: {
0b00000001: ("o", "O"),
0b00000010: ("p", "P"),
0b00000100: ("[", "{"),
0b00001000: ("]", "}"),
0b00010000: "<enter>",
#0b00100000: "<left ctrl>",
0b01000000: ("a", "A"),
0b10000000: ("s", "S"),
},
5: {
0b00000001: ("d", "D"),
0b00000010: ("f", "F"),
0b00000100: ("g", "G"),
0b00001000: ("h", "H"),
0b00010000: ("j", "J"),
0b00100000: ("k", "K"),
0b01000000: ("l", "L"),
0b10000000: (";", ":"),
},
6: {
0b00000001: ("'", "\""),
0b00000010: ("`", "~"),
#0b00000100: "<left shift>",
0b00001000: ("\\", "|"),
0b00010000: ("z", "Z"),
0b00100000: ("x", "X"),
0b01000000: ("c", "C"),
0b10000000: ("v", "V"),
},
7: {
0b00000001: ("b", "B"),
0b00000010: ("n", "N"),
0b00000100: ("m", "M"),
0b00001000: (",", "<"),
0b00010000: (".", ">"),
0b00100000: ("/", "?"),
#0b01000000: "<right shift>",
},
8: {
#0b00000001: "<left alt>",
0b00000010: " ",
0b00000100: "<caps lock>",
},
13: {
#0b00000010: "<right ctrl>",
#0b00010000: "<right alt>",
},
}
def fetch_keys_raw():
x11.XQueryKeymap(display, keyboard)
return keyboard
def fetch_keys():
global caps_lock_state, last_pressed, last_pressed_adjusted, last_modifier_state
keypresses_raw = fetch_keys_raw()
# check modifier states (ctrl, alt, shift keys)
modifier_state = {}
for mod, (i, byte) in modifiers.iteritems():
modifier_state[mod] = bool(ord(keypresses_raw[i]) & byte)
# shift pressed?
shift = 0
for i, byte in shift_keys:
if ord(keypresses_raw[i]) & byte:
shift = 1
break
# caps lock state
if ord(keypresses_raw[8]) & 4: caps_lock_state = int(not caps_lock_state)
# aggregate the pressed keys
pressed = []
for i, k in enumerate(keypresses_raw):
o = ord(k)
if o:
for byte,key in key_mapping.get(i, {}).iteritems():
if byte & o:
if isinstance(key, tuple): key = key[shift or caps_lock_state]
pressed.append(key)
tmp = pressed
pressed = list(set(pressed).difference(last_pressed))
state_changed = tmp != last_pressed and (pressed or last_pressed_adjusted)
last_pressed = tmp
last_pressed_adjusted = pressed
if pressed: pressed = pressed[0]
else: pressed = None
state_changed = last_modifier_state and (state_changed or modifier_state != last_modifier_state)
last_modifier_state = modifier_state
return state_changed, modifier_state, pressed
class KeyLoggerThread(QThread):
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.exiting = False
# self.start('|');
def run(self):
print('run in keyLogger()')
now = time()
done = lambda: time() > now + 60
# def print_keys(t, modifiers, keys):
# if keys =='|':
# print('YES!')
# print "%.2f %r %r" % (t, keys, modifiers)
self.log()
def log(self, sleep_interval=.005):
from Main import GLOBAL_VARIABLE
while True:
sleep(sleep_interval)
changed, modifiers, keys = fetch_keys()
if changed and keys == '`':
self.emit(SIGNAL(GLOBAL_VARIABLE.KEY_PRESSED))
| {
"repo_name": "mohammadkrb/Fittak",
"path": "KeyLogger.py",
"copies": "1",
"size": "6753",
"license": "mit",
"hash": 2092507869730315000,
"line_mean": 27.982832618,
"line_max": 100,
"alpha_frac": 0.5690804087,
"autogenerated": false,
"ratio": 3.533751962323391,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9572758910832042,
"avg_score": 0.006014692038269777,
"num_lines": 233
} |
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
from argparse import Namespace
from dataclasses import dataclass, field
from omegaconf import II
from typing import Optional
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.data.data_utils import post_process
from fairseq.tasks import FairseqTask
from fairseq.logging.meters import safe_round
@dataclass
class CtcCriterionConfig(FairseqDataclass):
zero_infinity: bool = field(
default=False,
metadata={"help": "zero inf loss when source length <= target length"},
)
sentence_avg: bool = II("optimization.sentence_avg")
post_process: str = field(
default="letter",
metadata={
"help": "how to post process predictions into words. can be letter, "
"wordpiece, BPE symbols, etc. "
"See fairseq.data.data_utils.post_process() for full list of options"
},
)
wer_kenlm_model: Optional[str] = field(
default=None,
metadata={
"help": "if this is provided, use kenlm to compute wer (along with other wer_* args)"
},
)
wer_lexicon: Optional[str] = field(
default=None,
metadata={"help": "lexicon to use with wer_kenlm_model"},
)
wer_lm_weight: float = field(
default=2.0,
metadata={"help": "lm weight to use with wer_kenlm_model"},
)
wer_word_score: float = field(
default=-1.0,
metadata={"help": "lm word score to use with wer_kenlm_model"},
)
wer_args: Optional[str] = field(
default=None,
metadata={
"help": "DEPRECATED: tuple of (wer_kenlm_model, wer_lexicon, wer_lm_weight, wer_word_score)"
},
)
@register_criterion("ctc", dataclass=CtcCriterionConfig)
class CtcCriterion(FairseqCriterion):
def __init__(self, cfg: CtcCriterionConfig, task: FairseqTask):
super().__init__(task)
self.blank_idx = (
task.target_dictionary.index(task.blank_symbol)
if hasattr(task, "blank_symbol")
else 0
)
self.pad_idx = task.target_dictionary.pad()
self.eos_idx = task.target_dictionary.eos()
self.post_process = cfg.post_process
if cfg.wer_args is not None:
(
cfg.wer_kenlm_model,
cfg.wer_lexicon,
cfg.wer_lm_weight,
cfg.wer_word_score,
) = eval(cfg.wer_args)
if cfg.wer_kenlm_model is not None:
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
dec_args = Namespace()
dec_args.nbest = 1
dec_args.criterion = "ctc"
dec_args.kenlm_model = cfg.wer_kenlm_model
dec_args.lexicon = cfg.wer_lexicon
dec_args.beam = 50
dec_args.beam_size_token = min(50, len(task.target_dictionary))
dec_args.beam_threshold = min(50, len(task.target_dictionary))
dec_args.lm_weight = cfg.wer_lm_weight
dec_args.word_score = cfg.wer_word_score
dec_args.unk_weight = -math.inf
dec_args.sil_weight = 0
self.w2l_decoder = W2lKenLMDecoder(dec_args, task.target_dictionary)
else:
self.w2l_decoder = None
self.zero_infinity = cfg.zero_infinity
self.sentence_avg = cfg.sentence_avg
def forward(self, model, sample, reduce=True):
net_output = model(**sample["net_input"])
lprobs = model.get_normalized_probs(
net_output, log_probs=True
).contiguous() # (T, B, C) from the encoder
if "src_lengths" in sample["net_input"]:
input_lengths = sample["net_input"]["src_lengths"]
else:
if net_output["padding_mask"] is not None:
non_padding_mask = ~net_output["padding_mask"]
input_lengths = non_padding_mask.long().sum(-1)
else:
input_lengths = lprobs.new_full(
(lprobs.size(1),), lprobs.size(0), dtype=torch.long
)
pad_mask = (sample["target"] != self.pad_idx) & (
sample["target"] != self.eos_idx
)
targets_flat = sample["target"].masked_select(pad_mask)
if "target_lengths" in sample:
target_lengths = sample["target_lengths"]
else:
target_lengths = pad_mask.sum(-1)
with torch.backends.cudnn.flags(enabled=False):
loss = F.ctc_loss(
lprobs,
targets_flat,
input_lengths,
target_lengths,
blank=self.blank_idx,
reduction="sum",
zero_infinity=self.zero_infinity,
)
ntokens = (
sample["ntokens"] if "ntokens" in sample else target_lengths.sum().item()
)
sample_size = sample["target"].size(0) if self.sentence_avg else ntokens
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"ntokens": ntokens,
"nsentences": sample["id"].numel(),
"sample_size": sample_size,
}
if not model.training:
import editdistance
with torch.no_grad():
lprobs_t = lprobs.transpose(0, 1).float().contiguous().cpu()
c_err = 0
c_len = 0
w_errs = 0
w_len = 0
wv_errs = 0
for lp, t, inp_l in zip(
lprobs_t,
sample["target_label"]
if "target_label" in sample
else sample["target"],
input_lengths,
):
lp = lp[:inp_l].unsqueeze(0)
decoded = None
if self.w2l_decoder is not None:
decoded = self.w2l_decoder.decode(lp)
if len(decoded) < 1:
decoded = None
else:
decoded = decoded[0]
if len(decoded) < 1:
decoded = None
else:
decoded = decoded[0]
p = (t != self.task.target_dictionary.pad()) & (
t != self.task.target_dictionary.eos()
)
targ = t[p]
targ_units = self.task.target_dictionary.string(targ)
targ_units_arr = targ.tolist()
toks = lp.argmax(dim=-1).unique_consecutive()
pred_units_arr = toks[toks != self.blank_idx].tolist()
c_err += editdistance.eval(pred_units_arr, targ_units_arr)
c_len += len(targ_units_arr)
targ_words = post_process(targ_units, self.post_process).split()
pred_units = self.task.target_dictionary.string(pred_units_arr)
pred_words_raw = post_process(pred_units, self.post_process).split()
if decoded is not None and "words" in decoded:
pred_words = decoded["words"]
w_errs += editdistance.eval(pred_words, targ_words)
wv_errs += editdistance.eval(pred_words_raw, targ_words)
else:
dist = editdistance.eval(pred_words_raw, targ_words)
w_errs += dist
wv_errs += dist
w_len += len(targ_words)
logging_output["wv_errors"] = wv_errs
logging_output["w_errors"] = w_errs
logging_output["w_total"] = w_len
logging_output["c_errors"] = c_err
logging_output["c_total"] = c_len
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
nsentences = utils.item(
sum(log.get("nsentences", 0) for log in logging_outputs)
)
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar("ntokens", ntokens)
metrics.log_scalar("nsentences", nsentences)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
c_errors = sum(log.get("c_errors", 0) for log in logging_outputs)
metrics.log_scalar("_c_errors", c_errors)
c_total = sum(log.get("c_total", 0) for log in logging_outputs)
metrics.log_scalar("_c_total", c_total)
w_errors = sum(log.get("w_errors", 0) for log in logging_outputs)
metrics.log_scalar("_w_errors", w_errors)
wv_errors = sum(log.get("wv_errors", 0) for log in logging_outputs)
metrics.log_scalar("_wv_errors", wv_errors)
w_total = sum(log.get("w_total", 0) for log in logging_outputs)
metrics.log_scalar("_w_total", w_total)
if c_total > 0:
metrics.log_derived(
"uer",
lambda meters: safe_round(
meters["_c_errors"].sum * 100.0 / meters["_c_total"].sum, 3
)
if meters["_c_total"].sum > 0
else float("nan"),
)
if w_total > 0:
metrics.log_derived(
"wer",
lambda meters: safe_round(
meters["_w_errors"].sum * 100.0 / meters["_w_total"].sum, 3
)
if meters["_w_total"].sum > 0
else float("nan"),
)
metrics.log_derived(
"raw_wer",
lambda meters: safe_round(
meters["_wv_errors"].sum * 100.0 / meters["_w_total"].sum, 3
)
if meters["_w_total"].sum > 0
else float("nan"),
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| {
"repo_name": "pytorch/fairseq",
"path": "fairseq/criterions/ctc.py",
"copies": "1",
"size": "11065",
"license": "mit",
"hash": -1170510497513276200,
"line_mean": 36.5084745763,
"line_max": 104,
"alpha_frac": 0.5282422052,
"autogenerated": false,
"ratio": 3.9560243117626026,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9981484583661173,
"avg_score": 0.0005563866602857781,
"num_lines": 295
} |
# All rights reserved
# Work extracted from webob.org website
# thttp://docs.webob.org/en/latest/do-it-yourself.html
import re
import sys
from webob import Request, Response
from webob import exc
import eventlet
from eventlet import wsgi
# \{ (\w+)(?::([^}]+))?\}
var_regex = re.compile(r'''
\{ # The exact character "{"
(\w+) # The variable name (restricted to a-z, 0-9, _)
(?::([^}]+))? # The optional :regex part
\} # The exact character "}"
''', re.VERBOSE)
def template_to_regex(template):
regex = ''
last_pos = 0
for match in var_regex.finditer(template):
regex += re.escape(template[last_pos:match.start()])
var_name = match.group(1)
expr = match.group(2) or '[^/]+'
expr = '(?P<%s>%s)' % (var_name, expr)
regex += expr
last_pos = match.end()
regex += re.escape(template[last_pos:])
regex = '^%s$' % regex
return regex
def load_controller(string):
module_name, func_name = string.split(':', 1)
__import__(module_name)
module = sys.modules[module_name]
func = getattr(module, func_name)
return func
class Router(object):
def __init__(self):
self.routes = []
def add_route(self, template, controller, **vars):
if isinstance(controller, basestring):
controller = load_controller(controller)
self.routes.append((re.compile(template_to_regex(template)),
controller,
vars))
def __call__(self, environ, start_response):
req = Request(environ)
for regex, controller, vars in self.routes:
match = regex.match(req.path_info)
if match:
req.urlvars = match.groupdict()
req.urlvars.update(vars)
return controller(environ, start_response)
return exc.HTTPNotFound()(environ, start_response)
def controller(func):
def replacement(environ, start_response):
req = Request(environ)
try:
resp = func(req, **req.urlvars)
except exc.HTTPException, e:
resp = e
if isinstance(resp, basestring):
resp = Response(body=resp)
return resp(environ, start_response)
return replacement
def rest_controller(cls):
def replacement(environ, start_response):
req = Request(environ)
try:
instance = cls(req, **req.urlvars)
action = req.urlvars.get('action')
if action:
action += '_' + req.method.lower()
else:
action = req.method.lower()
try:
method = getattr(instance, action)
except AttributeError:
raise exc.HTTPNotFound("No action %s" % action)
resp = method()
if isinstance(resp, basestring):
resp = Response(body=resp)
except exc.HTTPException, e:
resp = e
return resp(environ, start_response)
return replacement
if __name__ == '__main__':
#================================================================================
@controller
def hello(req):
if req.method == 'POST':
return 'Hello from POST %s!' % req.params['name']
elif req.method == 'GET':
return '''<form method="POST">
You're name: <input type="text" name="name">
<input type="submit">
</form>'''
hello_world = Router()
hello_world.add_route('/', controller=hello)
req = Request.blank('/')
resp = req.get_response(hello_world)
print (resp)
req.method = 'POST'
req.body = 'name=Houssem'
resp = req.get_response(hello_world)
print (resp)
#================================================================================
print ('================================================================================')
#================================================================================
class Hello(object):
def __init__(self, req):
self.request = req
def post(self):
return 'Hello from POST %s!' % self.request.params['name']
def get(self):
return 'Hello from GET'
def put(self):
return 'Hello from PUT %s!' % self.request.params['name']
def delete(self):
return 'Hello from DELETE'
class Hello2(object):
def __init__(self, req, a, b):
self.a = a
self.b = b
self.request = req
def post(self):
print self.request.body
return 'Hello2 from POST %s!' % self.request.params
def get(self):
return 'Hello2 from GET: 33333333 ' + str(self.a) + ' ' + str(self.b)
def put(self):
return 'Hello2 from PUT %s!' % self.request.params['name']
def delete(self):
return 'Hello2 from DELETE'
hello = rest_controller(Hello)
hello2 = rest_controller(Hello2)
hello_world = Router()
hello_world.add_route('/', controller=hello)
hello_world.add_route('/{a}/action={b}', controller=hello2)
req = Request.blank('/aValue/action=bValue')
req.body = 'name=Houssem'
resp = req.get_response(hello_world)
print (resp)
req.method = 'POST'
resp = req.get_response(hello_world)
print (resp)
req.method = 'GET'
resp = req.get_response(hello_world)
print (resp)
req.method = 'PUT'
resp = req.get_response(hello_world)
print (resp)
req.method = 'DELETE'
resp = req.get_response(hello_world)
print (resp)
# =======================================================================
class ocni_server(object):
"""
Represent the main occi REST server
"""
def run_server(self):
"""
to run the server
"""
wsgi.server(eventlet.listen(('', 8090)), hello_world)
pass
ocni_server_instance = ocni_server()
ocni_server_instance.run_server()
| {
"repo_name": "MarouenMechtri/CNG-Manager",
"path": "pyocni/pyocni_tools/DoItYourselfWebOb.py",
"copies": "3",
"size": "6121",
"license": "apache-2.0",
"hash": -8115954046624756000,
"line_mean": 27.4697674419,
"line_max": 94,
"alpha_frac": 0.5123345859,
"autogenerated": false,
"ratio": 4.175306957708049,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0025665904107442885,
"num_lines": 215
} |
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
ALIAS = 'ecmp_routes'
IS_SHIM_EXTENSION = True
IS_STANDARD_ATTR_EXTENSION = False
NAME = 'Equal Cost Multipath Routing'
API_PREFIX = ''
DESCRIPTION = ("Allows traffic to reach the"
"same destination via multiple different links ")
UPDATED_TIMESTAMP = "2020-11-26T18:00:00-00:00"
RESOURCE_ATTRIBUTE_MAP = {}
SUB_RESOURCE_ATTRIBUTE_MAP = {}
ACTION_MAP = {}
ACTION_STATUS = {}
REQUIRED_EXTENSIONS = []
OPTIONAL_EXTENSIONS = []
| {
"repo_name": "openstack/neutron-lib",
"path": "neutron_lib/api/definitions/ecmp_routes.py",
"copies": "1",
"size": "1065",
"license": "apache-2.0",
"hash": 1641086717633579500,
"line_mean": 36.0357142857,
"line_max": 78,
"alpha_frac": 0.696713615,
"autogenerated": false,
"ratio": 3.5618729096989967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47585865246989967,
"avg_score": null,
"num_lines": null
} |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_i18n
DOMAIN = "networking-fortinet"
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
# The primary translation function using the well-known name "_"
_ = _translators.primary
# The contextual translation function using the name "_C"
_C = _translators.contextual_form
# The plural translation function using the name "_P"
_P = _translators.plural_form
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
def get_available_languages():
return oslo_i18n.get_available_languages(DOMAIN)
| {
"repo_name": "samsu/networking-fortinet",
"path": "networking_fortinet/_i18n.py",
"copies": "1",
"size": "1409",
"license": "apache-2.0",
"hash": -8115641292244537000,
"line_mean": 31.5476190476,
"line_max": 78,
"alpha_frac": 0.7139815472,
"autogenerated": false,
"ratio": 3.5225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47364815472,
"avg_score": null,
"num_lines": null
} |
__all__ = ['RigidBody']
from sympy import sympify
from sympy.physics.mechanics.point import Point
from sympy.physics.mechanics.essential import ReferenceFrame, Dyadic
class RigidBody(object):
"""An idealized rigid body.
This is essentially a container which holds the various components which
describe a rigid body: a name, mass, center of mass, reference frame, and
inertia.
All of these need to be supplied on creation, but can be changed
afterwards.
Attributes
==========
name : string
The body's name.
masscenter : Point
The point which represents the center of mass of the rigid body.
frame : ReferenceFrame
The ReferenceFrame which the rigid body is fixed in.
mass : Sympifyable
The body's mass.
inertia : (Dyadic, Point)
The body's inertia about a point; stored in a tuple as shown above.
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.mechanics import ReferenceFrame, Point, RigidBody
>>> from sympy.physics.mechanics import outer
>>> m = Symbol('m')
>>> A = ReferenceFrame('A')
>>> P = Point('P')
>>> I = outer (A.x, A.x)
>>> inertia_tuple = (I, P)
>>> B = RigidBody('B', P, A, m, inertia_tuple)
>>> # Or you could change them afterwards
>>> m2 = Symbol('m2')
>>> B.mass = m2
"""
def __init__(self, name, masscenter, frame, mass, inertia):
if not isinstance(name, str):
raise TypeError('Supply a valid name.')
self._name = name
self.set_masscenter(masscenter)
self.set_mass(mass)
self.set_frame(frame)
self.set_inertia(inertia)
self._pe = sympify(0)
def __str__(self):
return self._name
__repr__ = __str__
def get_frame(self):
return self._frame
def set_frame(self, F):
if not isinstance(F, ReferenceFrame):
raise TypeError("RigdBody frame must be a ReferenceFrame object.")
self._frame = F
frame = property(get_frame, set_frame)
def get_masscenter(self):
return self._masscenter
def set_masscenter(self, p):
if not isinstance(p, Point):
raise TypeError("RigidBody center of mass must be a Point object.")
self._masscenter = p
masscenter = property(get_masscenter, set_masscenter)
def get_mass(self):
return self._mass
def set_mass(self, m):
self._mass = sympify(m)
mass = property(get_mass, set_mass)
def get_inertia(self):
return (self._inertia, self._inertia_point)
def set_inertia(self, I):
if not isinstance(I[0], Dyadic):
raise TypeError("RigidBody inertia must be a Dyadic object.")
if not isinstance(I[1], Point):
raise TypeError("RigidBody inertia must be about a Point.")
self._inertia = I[0]
self._inertia_point = I[1]
inertia = property(get_inertia, set_inertia)
def linear_momentum(self, frame):
""" Linear momentum of the rigid body.
The linear momentum L, of a rigid body B, with respect to frame N is
given by
L = M * v*
where M is the mass of the rigid body and v* is the velocity of
the mass center of B in the frame, N.
Parameters
==========
frame : ReferenceFrame
The frame in which linear momentum is desired.
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame, outer
>>> from sympy.physics.mechanics import RigidBody, dynamicsymbols
>>> M, v = dynamicsymbols('M v')
>>> N = ReferenceFrame('N')
>>> P = Point('P')
>>> P.set_vel(N, v * N.x)
>>> I = outer (N.x, N.x)
>>> Inertia_tuple = (I, P)
>>> B = RigidBody('B', P, N, M, Inertia_tuple)
>>> B.linear_momentum(N)
M*v*N.x
"""
return self.mass * self.masscenter.vel(frame)
def angular_momentum(self, point, frame):
""" Angular momentum of the rigid body.
The angular momentum H, about some point O, of a rigid body B, in a
frame N is given by
H = I* . omega + r* x (M * v)
where I* is the central inertia dyadic of B, omega is the angular
velocity of body B in the frame, N, r* is the position vector from
point O to the mass center of B, and v is the velocity of point O in
the frame, N.
Parameters
==========
point : Point
The point about which angular momentum is desired.
frame : ReferenceFrame
The frame in which angular momentum is desired.
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame, outer
>>> from sympy.physics.mechanics import RigidBody, dynamicsymbols
>>> M, v, r, omega = dynamicsymbols('M v r omega')
>>> N = ReferenceFrame('N')
>>> b = ReferenceFrame('b')
>>> b.set_ang_vel(N, omega * b.x)
>>> P = Point('P')
>>> P.set_vel(N, 1 * N.x)
>>> I = outer (b.x, b.x)
>>> Inertia_tuple = (I, P)
>>> B = RigidBody('B', P, b, M, Inertia_tuple)
>>> B.angular_momentum(P, N)
omega*b.x
"""
return ((self.inertia[0] & self.frame.ang_vel_in(frame)) +
(point.vel(frame) ^ -self.masscenter.pos_from(point)) *
self.mass)
def kinetic_energy(self, frame):
"""Kinetic energy of the rigid body
The kinetic energy, T, of a rigid body, B, is given by
'T = 1/2 (I omega^2 + m v^2)'
where I and m are the central inertia dyadic and mass of rigid body B,
respectively, omega is the body's angular velocity and v is the
velocity of the body's mass center in the supplied ReferenceFrame.
Parameters
==========
frame : ReferenceFrame
The RigidBody's angular velocity and the velocity of it's mass
center is typically defined with respect to an inertial frame but
any relevant frame in which the velocity is known can be supplied.
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame, outer
>>> from sympy.physics.mechanics import RigidBody
>>> from sympy import symbols
>>> M, v, r, omega = symbols('M v r omega')
>>> N = ReferenceFrame('N')
>>> b = ReferenceFrame('b')
>>> b.set_ang_vel(N, omega * b.x)
>>> P = Point('P')
>>> P.set_vel(N, v * N.x)
>>> I = outer (b.x, b.x)
>>> inertia_tuple = (I, P)
>>> B = RigidBody('B', P, b, M, inertia_tuple)
>>> B.kinetic_energy(N)
M*v**2/2 + omega**2/2
"""
rotational_KE = (self.frame.ang_vel_in(frame) & (self.inertia[0] &
self.frame.ang_vel_in(frame)) / sympify(2))
translational_KE = (self.mass * (self.masscenter.vel(frame) &
self.masscenter.vel(frame)) / sympify(2))
return rotational_KE + translational_KE
def set_potential_energy(self, scalar):
"""Used to set the potential energy of this RigidBody.
Parameters
==========
scalar: Sympifyable
The potential energy (a scalar) of the RigidBody.
Examples
========
>>> from sympy.physics.mechanics import Particle, Point, outer
>>> from sympy.physics.mechanics import RigidBody, ReferenceFrame
>>> from sympy import symbols
>>> b = ReferenceFrame('b')
>>> M, g, h = symbols('M g h')
>>> P = Point('P')
>>> I = outer (b.x, b.x)
>>> Inertia_tuple = (I, P)
>>> B = RigidBody('B', P, b, M, Inertia_tuple)
>>> B.set_potential_energy(M * g * h)
"""
self._pe = sympify(scalar)
@property
def potential_energy(self):
"""The potential energy of the RigidBody.
Examples
========
>>> from sympy.physics.mechanics import RigidBody, Point, outer, ReferenceFrame
>>> from sympy import symbols
>>> M, g, h = symbols('M g h')
>>> b = ReferenceFrame('b')
>>> P = Point('P')
>>> I = outer (b.x, b.x)
>>> Inertia_tuple = (I, P)
>>> B = RigidBody('B', P, b, M, Inertia_tuple)
>>> B.set_potential_energy(M * g * h)
>>> B.potential_energy
M*g*h
"""
return self._pe
| {
"repo_name": "flacjacket/sympy",
"path": "sympy/physics/mechanics/rigidbody.py",
"copies": "2",
"size": "8518",
"license": "bsd-3-clause",
"hash": -4834051523300184000,
"line_mean": 29.4214285714,
"line_max": 87,
"alpha_frac": 0.5606949988,
"autogenerated": false,
"ratio": 3.7018687527162104,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.526256375151621,
"avg_score": null,
"num_lines": null
} |
__all__ = ['RigidBody']
from sympy import sympify
from sympy.physics.mechanics.point import Point
from sympy.physics.mechanics.essential import ReferenceFrame, Dyadic
class RigidBody(object):
"""An idealized rigid body.
This is essentially a container which holds the various components which
describe a rigid body: a name, mass, center of mass, reference frame, and
inertia.
All of these need to be supplied on creation, but can be changed
afterwards.
Attributes
==========
name : string
The body's name.
masscenter : Point
The point which represents the center of mass of the rigid body.
frame : ReferenceFrame
The ReferenceFrame which the rigid body is fixed in.
mass : Sympifyable
The body's mass.
inertia : (Dyadic, Point)
The body's inertia about a point; stored in a tuple as shown above.
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.mechanics import ReferenceFrame, Point, RigidBody
>>> from sympy.physics.mechanics import outer
>>> m = Symbol('m')
>>> A = ReferenceFrame('A')
>>> P = Point('P')
>>> I = outer (A.x, A.x)
>>> inertia_tuple = (I, P)
>>> B = RigidBody('B', P, A, m, inertia_tuple)
>>> # Or you could change them afterwards
>>> m2 = Symbol('m2')
>>> B.mass = m2
"""
def __init__(self, name, masscenter, frame, mass, inertia):
if not isinstance(name, str):
raise TypeError('Supply a valid name.')
self._name = name
self.set_masscenter(masscenter)
self.set_mass(mass)
self.set_frame(frame)
self.set_inertia(inertia)
self._pe = sympify(0)
def __str__(self):
return self._name
__repr__ = __str__
def get_frame(self):
return self._frame
def set_frame(self, F):
if not isinstance(F, ReferenceFrame):
raise TypeError("RigdBody frame must be a ReferenceFrame object.")
self._frame = F
frame = property(get_frame, set_frame)
def get_masscenter(self):
return self._masscenter
def set_masscenter(self, p):
if not isinstance(p, Point):
raise TypeError("RigidBody center of mass must be a Point object.")
self._masscenter = p
masscenter = property(get_masscenter, set_masscenter)
def get_mass(self):
return self._mass
def set_mass(self, m):
self._mass = sympify(m)
mass = property(get_mass, set_mass)
def get_inertia(self):
return (self._inertia, self._inertia_point)
def set_inertia(self, I):
if not isinstance(I[0], Dyadic):
raise TypeError("RigidBody inertia must be a Dyadic object.")
if not isinstance(I[1], Point):
raise TypeError("RigidBody inertia must be about a Point.")
self._inertia = I[0]
self._inertia_point = I[1]
inertia = property(get_inertia, set_inertia)
def linear_momentum(self, frame):
""" Linear momentum of the rigid body.
The linear momentum L, of a rigid body B, with respect to frame N is
given by
L = M * v*
where M is the mass of the rigid body and v* is the velocity of
the mass center of B in the frame, N.
Parameters
==========
frame : ReferenceFrame
The frame in which linear momentum is desired.
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame, outer
>>> from sympy.physics.mechanics import RigidBody, dynamicsymbols
>>> M, v = dynamicsymbols('M v')
>>> N = ReferenceFrame('N')
>>> P = Point('P')
>>> P.set_vel(N, v * N.x)
>>> I = outer (N.x, N.x)
>>> Inertia_tuple = (I, P)
>>> B = RigidBody('B', P, N, M, Inertia_tuple)
>>> B.linear_momentum(N)
M*v*N.x
"""
return self.mass * self.masscenter.vel(frame)
def angular_momentum(self, point, frame):
""" Angular momentum of the rigid body.
The angular momentum H, about some point O, of a rigid body B, in a
frame N is given by
H = I* . omega + r* x (M * v)
where I* is the central inertia dyadic of B, omega is the angular
velocity of body B in the frame, N, r* is the position vector from
point O to the mass center of B, and v is the velocity of point O in
the frame, N.
Parameters
==========
point : Point
The point about which angular momentum is desired.
frame : ReferenceFrame
The frame in which angular momentum is desired.
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame, outer
>>> from sympy.physics.mechanics import RigidBody, dynamicsymbols
>>> M, v, r, omega = dynamicsymbols('M v r omega')
>>> N = ReferenceFrame('N')
>>> b = ReferenceFrame('b')
>>> b.set_ang_vel(N, omega * b.x)
>>> P = Point('P')
>>> P.set_vel(N, 1 * N.x)
>>> I = outer (b.x, b.x)
>>> Inertia_tuple = (I, P)
>>> B = RigidBody('B', P, b, M, Inertia_tuple)
>>> B.angular_momentum(P, N)
omega*b.x
"""
return ((self.inertia[0] & self.frame.ang_vel_in(frame)) +
(point.vel(frame) ^ -self.masscenter.pos_from(point)) *
self.mass)
def kinetic_energy(self, frame):
"""Kinetic energy of the rigid body
The kinetic energy, T, of a rigid body, B, is given by
'T = 1/2 (I omega^2 + m v^2)'
where I and m are the central inertia dyadic and mass of rigid body B,
respectively, omega is the body's angular velocity and v is the
velocity of the body's mass center in the supplied ReferenceFrame.
Parameters
==========
frame : ReferenceFrame
The RigidBody's angular velocity and the velocity of it's mass
center is typically defined with respect to an inertial frame but
any relevant frame in which the velocity is known can be supplied.
Examples
========
>>> from sympy.physics.mechanics import Point, ReferenceFrame, outer
>>> from sympy.physics.mechanics import RigidBody
>>> from sympy import symbols
>>> M, v, r, omega = symbols('M v r omega')
>>> N = ReferenceFrame('N')
>>> b = ReferenceFrame('b')
>>> b.set_ang_vel(N, omega * b.x)
>>> P = Point('P')
>>> P.set_vel(N, v * N.x)
>>> I = outer (b.x, b.x)
>>> inertia_tuple = (I, P)
>>> B = RigidBody('B', P, b, M, inertia_tuple)
>>> B.kinetic_energy(N)
M*v**2/2 + omega**2/2
"""
rotational_KE = (self.frame.ang_vel_in(frame) & (self.inertia[0] &
self.frame.ang_vel_in(frame)) / sympify(2))
translational_KE = (self.mass * (self.masscenter.vel(frame) &
self.masscenter.vel(frame)) / sympify(2))
return rotational_KE + translational_KE
def set_potential_energy(self, scalar):
"""Used to set the potential energy of this RigidBody.
Parameters
==========
scalar: Sympifyable
The potential energy (a scalar) of the RigidBody.
Examples
========
>>> from sympy.physics.mechanics import Particle, Point, outer
>>> from sympy.physics.mechanics import RigidBody, ReferenceFrame
>>> from sympy import symbols
>>> b = ReferenceFrame('b')
>>> M, g, h = symbols('M g h')
>>> P = Point('P')
>>> I = outer (b.x, b.x)
>>> Inertia_tuple = (I, P)
>>> B = RigidBody('B', P, b, M, Inertia_tuple)
>>> B.set_potential_energy(M * g * h)
"""
self._pe = sympify(scalar)
@property
def potential_energy(self):
"""The potential energy of the RigidBody.
Examples
========
>>> from sympy.physics.mechanics import RigidBody, Point, outer, ReferenceFrame
>>> from sympy import symbols
>>> M, g, h = symbols('M g h')
>>> b = ReferenceFrame('b')
>>> P = Point('P')
>>> I = outer (b.x, b.x)
>>> Inertia_tuple = (I, P)
>>> B = RigidBody('B', P, b, M, Inertia_tuple)
>>> B.set_potential_energy(M * g * h)
>>> B.potential_energy
M*g*h
"""
return self._pe
| {
"repo_name": "amitjamadagni/sympy",
"path": "sympy/physics/mechanics/rigidbody.py",
"copies": "2",
"size": "8519",
"license": "bsd-3-clause",
"hash": -3983425850745994000,
"line_mean": 29.3167259786,
"line_max": 87,
"alpha_frac": 0.5606291818,
"autogenerated": false,
"ratio": 3.702303346371143,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5262932528171143,
"avg_score": null,
"num_lines": null
} |
__all__ = ['rig_lib', 'node_registry', 'settings', 'vendor', 'exception']
import exception
import settings
import node_registry
import vendor.cgLogging as logger
LOG = logger.cgLogging.getLogger(__name__, level=logger.cgLogging.INFO, shell=False)
registry = node_registry.Registry()
register_node = registry.register_node
shapes = node_registry.MayaControlShapeFactory
import rig_lib
import rig_lib.core.core_utils.base_utils
import rig_lib.core.core_utils.maya_utils
# Registering all necessary functions/nodes with registry.
LOG.info('starting set_default_utils')
registry.set_default_utils(mode=settings.MODE)
LOG.info('starting control shapes registration')
import rig_lib.core.core_utils.maya_utils.control_shapes
LOG.info('starting nodes registration')
import rig_lib.core.nodes
LOG.info('starting set_default_nodes')
registry.set_default_nodes(mode=settings.MODE)
import rig_lib.elements
def log_function_call(func):
def wrapped(instance):
LOG.info('Running test %s' % func.__name__)
return func(instance)
wrapped.__name__ = func.__name__+'_logoutput'
return wrapped | {
"repo_name": "AndresMWeber/komodo",
"path": "rigger/__init__.py",
"copies": "1",
"size": "1109",
"license": "mit",
"hash": 8975450444167380000,
"line_mean": 29.8333333333,
"line_max": 84,
"alpha_frac": 0.7565374211,
"autogenerated": false,
"ratio": 3.4228395061728394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9635799038505944,
"avg_score": 0.008715577753378898,
"num_lines": 36
} |
__all__ = (
####### Class Objects
#CoGetClassObject - Normal, not wrapped
'CoDllGetClassObject', #Get ClassObject from a DLL file
####### ClassFactory::CreateInstance Wrappers
'CoCreateInstanceFromFactory', #Create an object via IClassFactory::CreateInstance
'CoCreateInstanceFromFactoryLicenced', #Create a licenced object via IClassFactory2::CreateInstanceLic
###### Util
'CoReleaseObject', #Calls Release() on a COM object
###### Main Utility Methods
#'CoCreateInstance', #Not wrapped, normal call
'CoCreateInstanceLicenced', #CoCreateInstance, but with a licence key
###### Hacky DLL methods for reg-free COM without Activation Contexts, manifests, etc
'CoCreateInstanceFromDll', #Given a dll, a clsid, and an iid, create an object
'CoCreateInstanceFromDllLicenced', #Given a dll, a clsid, an iid, and a license key, create an object
)
IID_IClassFactory2 = "{B196B28F-BAB4-101A-B69C-00AA00341D07}"
from uuid import UUID
from ctypes import OleDLL, WinDLL, c_ulong, byref, WINFUNCTYPE, POINTER, c_char_p, c_void_p
from ctypes.wintypes import HRESULT
import pythoncom
import win32com.client
import logging
log = logging.getLogger(__name__)
def _raw_guid(guid):
"""Given a string GUID, or a pythoncom IID, return the GUID laid out in memory suitable for passing to ctypes"""
return UUID(str(guid)).bytes_le
proto_icf2_base = WINFUNCTYPE(HRESULT,
c_ulong,
c_ulong,
c_char_p,
c_ulong,
POINTER(c_ulong),
)
IClassFactory2__CreateInstanceLic = proto_icf2_base(7, 'CreateInstanceLic', (
(1, 'pUnkOuter'),
(1 | 4, 'pUnkReserved'),
(1, 'riid'),
(1, 'bstrKey'),
(2, 'ppvObj'),
), _raw_guid(IID_IClassFactory2))
#--------------------------------
#--------------------------------
def _pc_wrap(iptr, resultCLSID=None):
#return win32com.client.__WrapDispatch(iptr)
log.debug("_pc_wrap: %s, %s"%(iptr, resultCLSID))
disp = win32com.client.Dispatch(iptr, resultCLSID=resultCLSID)
log.debug("_pc_wrap: %s (%s)", disp.__class__.__name__, disp)
return disp
def CoCreateInstanceFromFactory(factory_ptr, iid_interface=pythoncom.IID_IDispatch, pUnkOuter=None):
"""Given a factory_ptr whose interface is IClassFactory, create the instance of clsid_class with the specified interface"""
ClassFactory = pythoncom.ObjectFromAddress(factory_ptr.value, pythoncom.IID_IClassFactory)
i = ClassFactory.CreateInstance(pUnkOuter, iid_interface)
return i
def CoCreateInstanceFromFactoryLicenced(factory_ptr, key, iid_interface=pythoncom.IID_IDispatch, pUnkOuter=None):
"""Given a factory_ptr whose interface is IClassFactory2, create the instance of clsid_class with the specified interface"""
requested_iid = _raw_guid(iid_interface)
ole_aut = WinDLL("OleAut32.dll")
key_bstr = ole_aut.SysAllocString(unicode(key))
try:
obj = IClassFactory2__CreateInstanceLic(factory_ptr, pUnkOuter or 0, c_char_p(requested_iid), key_bstr)
disp_obj = pythoncom.ObjectFromAddress(obj, iid_interface)
return disp_obj
finally:
if key_bstr:
ole_aut.SysFreeString(key_bstr)
#----------------------------------
def CoReleaseObject(obj_ptr):
"""Calls Release() on a COM object. obj_ptr should be a c_void_p"""
if not obj_ptr:
return
IUnknown__Release = WINFUNCTYPE(HRESULT)(2, 'Release', (), pythoncom.IID_IUnknown)
IUnknown__Release(obj_ptr)
#-----------------------------------
def CoCreateInstanceLicenced(clsid_class, key, pythoncom_iid_interface=pythoncom.IID_IDispatch, dwClsContext=pythoncom.CLSCTX_SERVER, pythoncom_wrapdisp=True, wrapas=None):
"""Uses IClassFactory2::CreateInstanceLic to create a COM object given a licence key."""
IID_IClassFactory2 = "{B196B28F-BAB4-101A-B69C-00AA00341D07}"
ole = OleDLL("Ole32.dll")
clsid_class_raw = _raw_guid(clsid_class)
iclassfactory2 = _raw_guid(IID_IClassFactory2)
com_classfactory = c_void_p(0)
ole.CoGetClassObject(clsid_class_raw, dwClsContext, None, iclassfactory2, byref(com_classfactory))
try:
iptr = CoCreateInstanceFromFactoryLicenced(
factory_ptr = com_classfactory,
key=key,
iid_interface=pythoncom_iid_interface,
pUnkOuter=None,
)
if pythoncom_wrapdisp:
return _pc_wrap(iptr, resultCLSID=wrapas or clsid_class)
return iptr
finally:
if com_classfactory:
CoReleaseObject(com_classfactory)
#-----------------------------------------------------------
#DLLs
def CoDllGetClassObject(dll_filename, clsid_class, iid_factory=pythoncom.IID_IClassFactory):
"""Given a DLL filename and a desired class, return the factory for that class (as a c_void_p)"""
dll = OleDLL(dll_filename)
clsid_class = _raw_guid(clsid_class)
iclassfactory = _raw_guid(iid_factory)
com_classfactory = c_void_p(0)
dll.DllGetClassObject(clsid_class, iclassfactory, byref(com_classfactory))
return com_classfactory
def CoCreateInstanceFromDll(dll, clsid_class, iid_interface=pythoncom.IID_IDispatch, pythoncom_wrapdisp=True, wrapas=None):
iclassfactory_ptr = CoDllGetClassObject(dll, clsid_class)
try:
iptr = CoCreateInstanceFromFactory(iclassfactory_ptr, iid_interface)
if pythoncom_wrapdisp:
return _pc_wrap(iptr, resultCLSID=wrapas or clsid_class)
return iptr
finally:
CoReleaseObject(iclassfactory_ptr)
def CoCreateInstanceFromDllLicenced(dll, clsid_class, key, iid_interface=pythoncom.IID_IDispatch, pythoncom_wrapdisp=True, wrapas=None):
iclassfactory2_ptr = CoDllGetClassObject(dll, clsid_class, iid_factory=IID_IClassFactory2)
try:
iptr = CoCreateInstanceFromFactoryLicenced(iclassfactory2_ptr, key, iid_interface)
if pythoncom_wrapdisp:
return _pc_wrap(iptr, resultCLSID=wrapas or clsid_class)
return iptr
finally:
CoReleaseObject(iclassfactory2_ptr) | {
"repo_name": "xbcsmith/frell",
"path": "test/win32_get_object.py",
"copies": "1",
"size": "6150",
"license": "apache-2.0",
"hash": 3178155196268830700,
"line_mean": 38.7417218543,
"line_max": 172,
"alpha_frac": 0.6655284553,
"autogenerated": false,
"ratio": 3.324324324324324,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9434680177285882,
"avg_score": 0.011034520467688371,
"num_lines": 151
} |
__all__ = (
'Column', 'TextColumn', 'NumberColumn',
)
class Column(object):
"""Represents a single column of a table.
``verbose_name`` defines a display name for this column used for output.
``name`` is the internal name of the column. Normally you don't need to
specify this, as the attribute that you make the column available under
is used. However, in certain circumstances it can be useful to override
this default, e.g. when using ModelTables if you want a column to not
use the model field name.
``default`` is the default value for this column. If the data source
does provide ``None`` for a row, the default will be used instead. Note
that whether this effects ordering might depend on the table type (model
or normal). Also, you can specify a callable, which will be passed a
``BoundRow`` instance and is expected to return the default to be used.
Additionally, you may specify ``data``. It works very much like
``default``, except it's effect does not depend on the actual cell
value. When given a function, it will always be called with a row object,
expected to return the cell value. If given a string, that name will be
used to read the data from the source (instead of the column's name).
Note the interaction with ``default``. If ``default`` is specified as
well, it will be used whenver ``data`` yields in a None value.
You can use ``visible`` to flag the column as hidden by default.
However, this can be overridden by the ``visibility`` argument to the
table constructor. If you want to make the column completely unavailable
to the user, set ``inaccessible`` to True.
Setting ``sortable`` to False will result in this column being unusable
in ordering. You can further change the *default* sort direction to
descending using ``direction``. Note that this option changes the actual
direction only indirectly. Normal und reverse order, the terms
django-tables exposes, now simply mean different things.
"""
ASC = 1
DESC = 2
# Tracks each time a Column instance is created. Used to retain order.
creation_counter = 0
def __init__(self, verbose_name=None, name=None, default=None, data=None,
visible=True, inaccessible=False, sortable=None,
direction=ASC):
self.verbose_name = verbose_name
self.name = name
self.default = default
self.data = data
self.visible = visible
self.inaccessible = inaccessible
self.sortable = sortable
self.direction = direction
self.creation_counter = Column.creation_counter
Column.creation_counter += 1
def _set_direction(self, value):
if isinstance(value, basestring):
if value in ('asc', 'desc'):
self._direction = (value == 'asc') and Column.ASC or Column.DESC
else:
raise ValueError('Invalid direction value: %s' % value)
else:
self._direction = value
direction = property(lambda s: s._direction, _set_direction)
class TextColumn(Column):
pass
class NumberColumn(Column):
pass
| {
"repo_name": "jqb/django-tables",
"path": "django_tables/columns.py",
"copies": "1",
"size": "3276",
"license": "bsd-2-clause",
"hash": 1749462832732015900,
"line_mean": 39.4683544304,
"line_max": 80,
"alpha_frac": 0.6572039072,
"autogenerated": false,
"ratio": 4.556328233657858,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016100402339052078,
"num_lines": 79
} |
__all__ = [
'HTML5TreeBuilder',
]
import warnings
from bs4.builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from bs4.element import NamespacedAttribute
import html5lib
from html5lib.constants import namespaces
from bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
features = ['html5lib', PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
yield (markup, None, None, False)
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
doc = parser.parse(markup, encoding=self.user_specified_encoding)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, str):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return '<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
tag = self.soup.new_tag(name, namespace)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
"set attr", name, value
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(html5lib.treebuilders._base.Node):
def __init__(self, element, soup, namespace):
html5lib.treebuilders._base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
string_child = child = None
if isinstance(node, str):
# Some other piece of code decided to pass in a string
# instead of creating a TextElement object to contain the
# string.
string_child = child = node
elif isinstance(node, Tag):
# Some other piece of code decided to pass in a Tag
# instead of creating an Element object to contain the
# Tag.
child = node
elif node.element.__class__ == NavigableString:
string_child = child = node.element
else:
child = node.element
if not isinstance(child, str) and child.parent is not None:
node.element.extract()
if (string_child and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# We are appending a string onto another string.
# TODO This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + string_child)
old_element.replace_with(new_element)
self.soup._most_recent_element = new_element
else:
if isinstance(node, str):
# Create a brand new NavigableString from this string.
child = self.soup.new_string(node)
# Tell Beautiful Soup to act as if it parsed this element
# immediately after the parent's last descendant. (Or
# immediately after the parent, if it has no children.)
if self.element.contents:
most_recent_element = self.element._last_descendant(False)
else:
most_recent_element = self.element
self.soup.object_was_parsed(
child, parent=self.element,
most_recent_element=most_recent_element)
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and len(attributes) > 0:
converted_attributes = []
for name, value in list(attributes.items()):
if isinstance(name, tuple):
new_name = NamespacedAttribute(*name)
del attributes[name]
attributes[new_name] = value
self.soup.builder._replace_cdata_list_attribute_values(
self.name, attributes)
for name, value in list(attributes.items()):
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.soup.builder.set_up_substitutions(self.element)
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
if insertBefore:
text = TextNode(self.soup.new_string(data), self.soup)
self.insertBefore(data, insertBefore)
else:
self.appendChild(data)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
def reparentChildren(self, new_parent):
"""Move all of this tag's children into another tag."""
element = self.element
new_parent_element = new_parent.element
# Determine what this tag's next_element will be once all the children
# are removed.
final_next_element = element.next_sibling
new_parents_last_descendant = new_parent_element._last_descendant(False, False)
if len(new_parent_element.contents) > 0:
# The new parent already contains children. We will be
# appending this tag's children to the end.
new_parents_last_child = new_parent_element.contents[-1]
new_parents_last_descendant_next_element = new_parents_last_descendant.next_element
else:
# The new parent contains no children.
new_parents_last_child = None
new_parents_last_descendant_next_element = new_parent_element.next_element
to_append = element.contents
append_after = new_parent.element.contents
if len(to_append) > 0:
# Set the first child's previous_element and previous_sibling
# to elements within the new parent
first_child = to_append[0]
first_child.previous_element = new_parents_last_descendant
first_child.previous_sibling = new_parents_last_child
# Fix the last child's next_element and next_sibling
last_child = to_append[-1]
last_child.next_element = new_parents_last_descendant_next_element
last_child.next_sibling = None
for child in to_append:
child.parent = new_parent_element
new_parent_element.contents.append(child)
# Now that this element has no children, change its .next_element.
element.contents = []
element.next_element = final_next_element
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
html5lib.treebuilders._base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
| {
"repo_name": "hentaiPanda/bgm_tenkou",
"path": "bs4/builder/_html5lib.py",
"copies": "1",
"size": "10912",
"license": "mit",
"hash": 1574100160440551000,
"line_mean": 36.2877192982,
"line_max": 159,
"alpha_frac": 0.6052052786,
"autogenerated": false,
"ratio": 4.33704292527822,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017119482951920265,
"num_lines": 285
} |
__all__ = [
'ImproperlyConfigured', 'ElasticsearchException', 'SerializationError',
'TransportError', 'NotFoundError', 'ConflictError', 'RequestError', 'ConnectionError',
'SSLError', 'ConnectionTimeout'
]
class ImproperlyConfigured(Exception):
"""
Exception raised when the config passed to the client is inconsistent or invalid.
"""
class ElasticsearchException(Exception):
"""
Base class for all exceptions raised by this package's operations (doesn't
apply to :class:`~elasticsearch.ImproperlyConfigured`).
"""
class SerializationError(ElasticsearchException):
"""
Data passed in failed to serialize properly in the ``Serializer`` being
used.
"""
class TransportError(ElasticsearchException):
"""
Exception raised when ES returns a non-OK (>=400) HTTP status code. Or when
an actual connection error happens; in that case the ``status_code`` will
be set to ``'N/A'``.
"""
@property
def status_code(self):
"""
The HTTP status code of the response that precipitated the error or
``'N/A'`` if not applicable.
"""
return self.args[0]
@property
def error(self):
""" A string error message. """
return self.args[1]
@property
def info(self):
""" Dict of returned error info from ES, where available. """
return self.args[2]
def __str__(self):
cause = ''
try:
if self.info:
cause = ', %r' % self.info['error']['root_cause'][0]['reason']
except LookupError:
pass
return 'TransportError(%s, %r%s)' % (self.status_code, self.error, cause)
class ConnectionError(TransportError):
"""
Error raised when there was an exception while talking to ES. Original
exception from the underlying :class:`~elasticsearch.Connection`
implementation is available as ``.info.``
"""
def __str__(self):
return 'ConnectionError(%s) caused by: %s(%s)' % (
self.error, self.info.__class__.__name__, self.info)
class SSLError(ConnectionError):
""" Error raised when encountering SSL errors. """
class ConnectionTimeout(ConnectionError):
""" A network timeout. Doesn't cause a node retry by default. """
def __str__(self):
return 'ConnectionTimeout caused by - %s(%s)' % (
self.info.__class__.__name__, self.info)
class NotFoundError(TransportError):
""" Exception representing a 404 status code. """
class ConflictError(TransportError):
""" Exception representing a 409 status code. """
class RequestError(TransportError):
""" Exception representing a 400 status code. """
class AuthenticationException(TransportError):
""" Exception representing a 401 status code. """
class AuthorizationException(TransportError):
""" Exception representing a 403 status code. """
# more generic mappings from status_code to python exceptions
HTTP_EXCEPTIONS = {
400: RequestError,
401: AuthenticationException,
403: AuthorizationException,
404: NotFoundError,
409: ConflictError,
}
| {
"repo_name": "pogaku9/aws-datalake-quickstart-looker-isv-integration",
"path": "scripts/lambdas/writetoES/elasticsearch/exceptions.py",
"copies": "1",
"size": "3236",
"license": "apache-2.0",
"hash": 323567427144964100,
"line_mean": 27.6880733945,
"line_max": 90,
"alpha_frac": 0.6276266996,
"autogenerated": false,
"ratio": 4.5835694050991505,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5711196104699151,
"avg_score": null,
"num_lines": null
} |
__all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
from io import BytesIO
from io import StringIO
import collections
from lxml import etree
from bs4.element import Comment, Doctype, NamespacedAttribute
from bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
ParserRejectedMarkup,
TreeBuilder,
XML)
from bs4.dammit import EncodingDetector
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
# Well, it's permissive by XML parser standards.
features = [LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
def default_parser(self, encoding):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
if self._default_parser is not None:
return self._default_parser
return etree.XMLParser(
target=self, strip_cdata=False, recover=True, encoding=encoding)
def parser_for(self, encoding):
# Use the default parser.
parser = self.default_parser(encoding)
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False, encoding=encoding)
return parser
def __init__(self, parser=None, empty_element_tags=None):
# TODO: Issue a warning if parser is present but not a
# callable, since that means there's no way to create new
# parsers for different encodings.
self._default_parser = parser
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS]
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:yield: A series of 4-tuples.
(markup, encoding, declared encoding,
has undergone character replacement)
Each 4-tuple represents a strategy for parsing the document.
"""
if isinstance(markup, str):
# We were given Unicode. Maybe lxml can parse Unicode on
# this system?
yield markup, None, document_declared_encoding, False
if isinstance(markup, str):
# No, apparently not. Convert the Unicode to UTF-8 and
# tell lxml to parse it as UTF-8.
yield (markup.encode("utf8"), "utf8",
document_declared_encoding, False)
# Instead of using UnicodeDammit to convert the bytestring to
# Unicode using different encodings, use EncodingDetector to
# iterate over the encodings, and tell lxml to try to parse
# the document as each one in turn.
is_html = not self.is_xml
try_encodings = [user_specified_encoding, document_declared_encoding]
detector = EncodingDetector(markup, try_encodings, is_html)
for encoding in detector.encodings:
yield (detector.markup, encoding, document_declared_encoding, False)
def feed(self, markup):
if isinstance(markup, bytes):
markup = BytesIO(markup)
elif isinstance(markup, str):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
try:
self.parser = self.parser_for(self.soup.original_encoding)
self.parser.feed(data)
while len(data) != 0:
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if len(data) != 0:
self.parser.feed(data)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
raise ParserRejectedMarkup(str(e))
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
inverted_nsmap = dict((value, key) for key, value in list(nsmap.items()))
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in list(nsmap.items()):
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in list(attrs.items()):
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
def pi(self, target, data):
pass
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return '<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
features = [LXML, HTML, FAST, PERMISSIVE]
is_xml = False
def default_parser(self, encoding):
return etree.HTMLParser
def feed(self, markup):
encoding = self.soup.original_encoding
try:
self.parser = self.parser_for(encoding)
self.parser.feed(markup)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
raise ParserRejectedMarkup(str(e))
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return '<html><body>%s</body></html>' % fragment
| {
"repo_name": "hentaiPanda/bgm_tenkou",
"path": "bs4/builder/_lxml.py",
"copies": "1",
"size": "8896",
"license": "mit",
"hash": -3668046826124392400,
"line_mean": 36.1802575107,
"line_max": 85,
"alpha_frac": 0.5948741007,
"autogenerated": false,
"ratio": 4.310077519379845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5404951620079844,
"avg_score": null,
"num_lines": null
} |
__all__ = [
"utils",
"album", "albumbrowse", "artist", "artistbrowse",
"image", "inbox", "link", "localtrack",
"playlist", "playlistcontainer", "search",
"session", "toplistbrowse", "track", "user",
]
import _spotify
import threading
from spotify.utils.decorators import synchronized
def handle_sp_error(errcode):
if errcode != 0:
iface = _spotify.SpotifyInterface()
msg = iface.error_message(errcode)
raise LibSpotifyError(msg)
def build_id():
iface = _spotify.SpotifyInterface()
return iface.build_id()
class LibSpotifyError(Exception):
pass
class DuplicateCallbackError(LibSpotifyError):
pass
class UnknownCallbackError(LibSpotifyError):
pass
class ErrorType:
Ok = 0
BadApiVersion = 1
ApiInitializationFailed = 2
TrackNotPlayable = 3
BadApplicationKey = 5
BadUsernameOrPassword = 6
UserBanned = 7
UnableToContactServer = 8
ClientTooOld = 9
OtherPermanent = 10
BadUserAgent = 11
MissingCallback = 12
InvalidIndata = 13
IndexOutOfRange = 14
UserNeedsPremium = 15
OtherTransient = 16
IsLoading = 17
NoStreamAvailable = 18
PermissionDenied = 19
InboxIsFull = 20
NoCache = 21
NoSuchUser = 22
NoCredentials = 23
NetworkDisabled = 24
InvalidDeviceId = 25
CantOpenTraceFile = 26
ApplicationBanned = 27
OfflineTooManyTracks = 31
OfflineDiskCache = 32
OfflineExpired = 33
OfflineNotAllowed = 34
OfflineLicenseLost = 35
OfflineLicenseError = 36
LastfmAuthError = 39
InvalidArgument = 40
SystemFailure = 41
class SampleType:
Int16NativeEndian = 0
class ConnectionRules:
Network = 0x1
NetworkIfRoaming = 0x2
AllowSyncOverMobile = 0x4
AllowSyncOverWifi = 0x8
class ConnectionType:
Unknown = 0
Disconnected = 1
Mobile = 2
MobileRoaming = 3
Wifi = 4
Wired = 5
class ConnectionState:
LoggedOut = 0
LoggedIn = 1
Disconnected = 2
Undefined = 3
Offline = 4
class Bitrate:
Rate160k = 0
Rate320k = 1
Rate96k = 2
class SocialProvider:
Spotify = 0
Facebook = 1
Lastfm = 2
class ScrobblingState:
UseGlobalSetting = 0
LocalEnabled = 1
LocalDisabled = 2
GlobalEnabled = 3
GlobalDisabled = 4
class MainLoop:
__notify_flag = None
__quit_flag = None
__quit_test = None
def __init__(self):
self.__notify_flag = threading.Event()
self.__quit_flag = threading.Event()
#Py 2.6+
if hasattr(self.__quit_flag, 'is_set'):
self.__quit_test = self.__quit_flag.is_set
#Fallback for earlier python versions
else:
self.__quit_test = self.__quit_flag.isSet
def loop(self, session):
timeout = None
while not self.__quit_test():
self.__notify_flag.wait(timeout)
self.__notify_flag.clear()
timeout = session.process_events()
def notify(self):
self.__notify_flag.set()
def quit(self):
self.__quit_flag.set()
self.notify()
class CallbackItem:
def __init__(self, **args):
self.__dict__.update(args)
class CallbackQueueManager:
_callbacks = None
def __init__(self):
self._callbacks = []
def add_callback(self, condition, callback, *args):
self._callbacks.append(
CallbackItem(
condition = condition,
callback = callback,
args = args,
)
)
def process_callbacks(self):
for item in self._callbacks:
if item.condition():
self._callbacks.remove(item)
item.callback(*item.args)
class BulkConditionChecker:
_conditions = None
_event = None
def __init__(self):
self._conditions = []
self._event = threading.Event()
@synchronized
def add_condition(self, condition):
self._conditions.append(condition)
@synchronized
def check_conditions(self):
#Generate a new list with false conditions
self._conditions = [item for item in self._conditions if not item()]
#If list size reaches to zero all conditions have been met
if len(self._conditions) == 0:
self._complete()
return True
else:
return False
def _complete(self):
self._event.set()
self.complete()
def complete(self):
pass
def try_complete_wait(self, timeout = None):
#Clear the event first, so we make a "clean" check
self._event.clear()
#Check conditions if they have been already met, and wait
self.check_conditions()
self._event.wait(timeout)
#Return the events's status
return self._event.isSet()
def complete_wait(self, timeout = None):
#Fail if the call returns due to a timeout
if not self.try_complete_wait(timeout):
raise RuntimeError('Timed out while waiting for an event.')
class CallbackManager:
__callbacks = None
def __init__(self):
self.__callbacks = {}
def _create_class(self, callback):
return None
def add_callbacks(self, callbacks):
cb_id = id(callbacks)
if cb_id in self.__callbacks:
raise DuplicateCallbackError()
else:
self.__callbacks[cb_id] = CallbackItem(
callbacks = callbacks,
custom_class = self._create_class(callbacks)
)
def remove_callbacks(self, callbacks):
cb_id = id(callbacks)
if cb_id not in self.__callbacks:
raise UnknownCallbackError()
else:
del self.__callbacks[cb_id]
def remove_all_callbacks(self):
for item in self.__callbacks.values():
self.remove_callbacks(item.callbacks)
def _call_funcs(self, name, *args, **kwargs):
for item in self.__callbacks.values():
f = getattr(item.callbacks, name)
f(*args, **kwargs)
def __getattr__(self, name):
return lambda *args, **kwargs: self._call_funcs(name, *args, **kwargs)
def __del__(self):
self.remove_all_callbacks()
| {
"repo_name": "mazkolain/pyspotify-ctypes",
"path": "src/spotify/__init__.py",
"copies": "1",
"size": "6824",
"license": "bsd-3-clause",
"hash": -2044616974678083600,
"line_mean": 19.9421221865,
"line_max": 78,
"alpha_frac": 0.5496776084,
"autogenerated": false,
"ratio": 4.289126335637963,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5338803944037963,
"avg_score": null,
"num_lines": null
} |
"""All route definitions for the beardo control server
"""
# third-party imports
from nacelle.core.routes import MultiPrefixRoute
from webapp2 import Route
from webapp2_extras.routes import RedirectRoute
ROUTES = [
# Dashboard routes
MultiPrefixRoute(
handler_pfx='app.handlers.template.dashboard.',
routes=[
Route(r'/', 'login', name='login'),
RedirectRoute(r'/_/', 'dashboard', name='dashboard', strict_slash=True),
],
),
# Project management routes
MultiPrefixRoute(
handler_pfx='app.handlers.template.projects.',
path_pfx='/_/projects',
routes=[
RedirectRoute(r'/', 'projects_list', name='projects-list', strict_slash=True),
RedirectRoute(r'/new/', 'projects_add', name='projects-add', strict_slash=True),
RedirectRoute(r'/<project_id>/', 'projects_edit', name='projects-edit', strict_slash=True),
RedirectRoute(r'/<project_id>/delete/', 'projects_delete', name='projects-delete', strict_slash=True),
RedirectRoute(r'/<project_id>/<build_id>/', 'build_view', name='build-view', strict_slash=True),
],
),
# User/key management routes
MultiPrefixRoute(
handler_pfx='app.handlers.template.users.',
path_pfx='/_/users',
routes=[
RedirectRoute(r'/', 'users_list', name='users-list', strict_slash=True),
RedirectRoute(r'/<user_id>/', 'users_profile', name='users-profile', strict_slash=True),
RedirectRoute(r'/<user_id>/add-key/', 'ssh_keys_add', name='ssh-keys-add', strict_slash=True),
RedirectRoute(r'/<user_id>/delete-key/<ssh_key_id>/', 'ssh_keys_delete', name='ssh-keys-delete', strict_slash=True),
RedirectRoute(r'/<user_id>/delete/', 'users_delete', name='users-delete', strict_slash=True),
],
),
# Task routes
Route(r'/_tasks/gitlab/sync/projects/', 'app.handlers.tasks.gitlab.sync_projects', name='tasks-gitlab-sync-projects'),
Route(r'/_tasks/gitlab/sync/ssh_keys/<user_id>/', 'app.handlers.tasks.gitlab.sync_ssh_keys', name='tasks-gitlab-sync-ssh-keys'),
Route(r'/_tasks/gitlab/sync/users/', 'app.handlers.tasks.gitlab.sync_users', name='tasks-gitlab-sync-users'),
# Webhook routes
MultiPrefixRoute(
handler_pfx='app.handlers.webhooks.',
path_pfx='/_webhooks',
routes=[
Route(r'/push/<project_id>', 'push_hook', name='push-hook'),
],
),
# Build queue routes
Route(r'/_build/queue/', 'app.handlers.queue.build_queue', name='build-queue'),
]
| {
"repo_name": "paddycarey/beardo-control",
"path": "app/app/routes.py",
"copies": "1",
"size": "2613",
"license": "mit",
"hash": 1081390550391589000,
"line_mean": 36.8695652174,
"line_max": 132,
"alpha_frac": 0.6215078454,
"autogenerated": false,
"ratio": 3.569672131147541,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9680635432025478,
"avg_score": 0.002108908904412477,
"num_lines": 69
} |
"""All routes for rendered views goes here."""
from urlparse import urljoin
from PIL import Image, ImageOps
from passlib.hash import bcrypt
from unidecode import unidecode
from flask import (render_template, redirect, request, url_for, flash, session,
abort, g)
from flask.ext.login import (login_user, logout_user, login_required,
current_user)
import os.path
import uuid
import datetime
from app import APP, DB, forms, models, auth, ct_connect, mailing
def make_external(url):
"""Return the external version of a url."""
return urljoin(request.url_root, url)
def redirect_url(default='index'):
"""Returns a redirect url."""
return request.referrer or url_for(default)
def image_resize(in_file, out_file, size=800):
"""Resize an image and saves it."""
img = Image.open(in_file)
img.thumbnail((size, size), Image.ANTIALIAS)
img.save(out_file)
def create_thumbnail(in_file, out_file):
"""Creates a thumbnail."""
size = (150, 150)
img = Image.open(in_file)
thumb = ImageOps.fit(img, size, Image.ANTIALIAS)
thumb.save(out_file)
def save_image(image, request_path, user_id):
"""Saves image, creates thumbnail and save it to the db."""
try:
image_uuid = str(uuid.uuid4())
# resize image and save it to upload folder
image_resize(image,
os.path.join(APP.root_path, APP.config['UPLOAD_FOLDER'],
image_uuid + '.jpg'),
size=800)
# create thumbnail
create_thumbnail(os.path.join(APP.root_path,
APP.config['UPLOAD_FOLDER'],
image_uuid + '.jpg'),
os.path.join(APP.root_path,
APP.config['UPLOAD_FOLDER'],
image_uuid + '-thumb.jpg'))
# check if user_metadata exists
user_metadata = models.get_user_metadata(user_id)
if not user_metadata:
metadata = models.UserMetadata(user_id)
DB.session.add(metadata)
# generate image db entry
image = models.Image(uuid=image_uuid,
upload_date=datetime.datetime.utcnow(),
upload_to=request_path,
user_id=user_id)
DB.session.add(image)
DB.session.commit()
return image_uuid
except:
return None
def get_recipients(profile_or_group, id):
"""Return recipient list for sending emails."""
with ct_connect.session_scope() as ct_session:
if 'profile' in profile_or_group:
person = ct_connect.get_person_from_id(ct_session, id)[0]
return [person.email]
elif 'group' in profile_or_group:
group = ct_connect.get_group_heads(ct_session, id)
return [i.email for i in group]
@APP.errorhandler(500)
def internal_error(error):
"""All things that should get done on 500 errors goes here."""
DB.session.rollback()
logout_user()
return redirect(url_for('login'))
@APP.before_request
def before_request():
"""Store search form in g before every request."""
g.search_form = forms.SearchForm()
@APP.route('/')
@login_required
def index():
"""The frontpage goes here."""
frontpage = models.FrontPage.query.all()[-1:]
# if there is at least one frontpage
if frontpage:
return render_template('index.html', frontpage=frontpage[-1])
# else just abort it
else:
return render_template('index.html', frontpage=None)
@APP.route('/edit', methods=['GET', 'POST'])
@login_required
@auth.valid_groups_and_users(users=[163], groups=[1])
def index_edit():
"""Frontpage edit form."""
frontpage = models.FrontPage.query.all()[-1:]
if not frontpage:
new_frontpage = models.FrontPage()
DB.session.add(new_frontpage)
frontpage = models.FrontPage.query.all()[-1:]
# prefill form
form = forms.EditIndexForm(
first_row_link=frontpage[0].first_row_link,
second_row_link=frontpage[0].second_row_link,
third_row_left_link=frontpage[0].third_row_left_link,
third_row_right_link=frontpage[0].third_row_right_link)
# on submit
if form.validate_on_submit():
try:
frontpage = models.FrontPage(
first_row_link=form.first_row_link.data,
second_row_link=form.second_row_link.data,
third_row_left_link=form.third_row_left_link.data,
third_row_right_link=form.third_row_right_link.data)
# handle uploaded images
form.first_row_image.data.stream.seek(0)
image = save_image(image=form.first_row_image.data.stream,
request_path=request.path,
user_id=auth.active_user()['id'])
if image:
frontpage.first_row_image = image
form.second_row_image.data.stream.seek(0)
image = save_image(image=form.second_row_image.data.stream,
request_path=request.path,
user_id=auth.active_user()['id'])
if image:
frontpage.second_row_image = image
form.third_row_left_image.data.stream.seek(0)
image = save_image(image=form.third_row_left_image.data.stream,
request_path=request.path,
user_id=auth.active_user()['id'])
if image:
frontpage.third_row_left_image = image
form.third_row_right_image.data.stream.seek(0)
image = save_image(image=form.third_row_right_image.data.stream,
request_path=request.path,
user_id=auth.active_user()['id'])
if image:
frontpage.third_row_right_image = image
DB.session.add(frontpage)
DB.session.commit()
flash('Index veraendert!', 'success')
return redirect(url_for('index'))
except:
flash('Es ist ein Fehler aufgetreten!', 'danger')
return redirect(url_for('index_edit'))
return render_template('index_edit.html', form=form)
@APP.route('/login', methods=['GET', 'POST'])
def login():
"""Login form."""
form = forms.LoginForm()
if form.validate_on_submit():
email = form.email.data
password = form.password.data
user_obj = auth.CTUser(uid=email, password=password)
user = user_obj.get_user()
if user:
valid_user = auth.get_valid_users(user, password)
else:
valid_user = None
if valid_user and user.is_active():
if login_user(user, remember=True):
# store valid user in session
session['user'] = valid_user
# set activate to True on the first user
session['user'][0]['active'] = True
flash('Erfolgreich eingeloggt!', 'success')
return redirect(url_for('index'))
else:
flash('Konnte nicht eingeloggt werden!', 'danger')
return redirect(url_for('login'))
else:
flash('Konnte nicht eingeloggt werden!', 'danger')
return redirect(url_for('login'))
return render_template('login.html', form=form)
@APP.route('/logout')
def logout():
"""Route to logout."""
logout_user()
flash('Erfolgreich ausgeloggt!', 'success')
return redirect(url_for('login'))
@APP.route('/news')
def news():
"""Route for the news blog overview."""
pass
@APP.route('/groups')
@login_required
def groups():
"""Groups overview."""
with ct_connect.session_scope() as ct_session:
groups = ct_connect.get_active_groups(ct_session)
groups_metadata = [models.get_group_metadata(i.id) for i in groups]
return render_template('groups.html',
groups=enumerate(groups),
groups_metadata=groups_metadata)
@APP.route('/group/<int:id>', methods=['GET', 'POST'])
@login_required
def group(id):
"""Single group view."""
with ct_connect.session_scope() as ct_session:
group = ct_connect.get_group(ct_session, id)
if not group:
abort(404)
group_metadata = models.get_group_metadata(id)
group_heads = ct_connect.get_group_heads(ct_session, id)
group_edit = False
if current_user.get_id() in [i.email for i in group_heads]:
group_edit = True
# if someone is trying to make a POST request and group_edit is False
# abort with a 403 status
if request.method == 'POST' and group_edit is False:
abort(403)
# set form to None that there is something to send to the template
# if logged in user is not allowed to edit group
form = None
if group_edit:
if not group_metadata:
group_metadata = models.GroupMetadata(ct_id=id)
DB.session.add(group_metadata)
# prefill form with db data
form = forms.EditGroupForm(description=group_metadata.description,
where=group.treffpunkt,
when=group.treffzeit,
audience=group.zielgruppe)
# clicked submit
if form.validate_on_submit():
try:
# metadata
group_metadata.description = form.description.data
# save image and set the image db true
form.group_image.data.stream.seek(0)
group_image = save_image(form.group_image.data.stream,
request_path=request.path,
user_id=auth.active_user()['id'])
if group_image:
group_metadata.avatar_id = group_image
# save to metadata db
DB.session.commit()
# churchtools
group.treffpunkt = form.where.data
group.treffzeit = form.when.data
group.zielgruppe = form.audience.data
# save data to churchtools db
ct_session.add(group)
ct_session.commit()
flash('Gruppe geaendert!', 'success')
return redirect(url_for('group', id=id))
except:
flash('Fehler aufgetreten!', 'danger')
return redirect(url_for('group', id=id))
return render_template('group.html',
group=group,
group_metadata=group_metadata,
group_heads=group_heads,
group_edit=group_edit,
form=form,
mail_form=forms.MailForm())
@APP.route('/prayer')
@login_required
def prayer():
"""Show random prayer."""
random_prayer = models.get_random_prayer()
return render_template('prayer.html', random_prayer=random_prayer)
@APP.route('/prayer/add', methods=['GET', 'POST'])
@login_required
def prayer_add():
"""Add prayer."""
form = forms.AddPrayerForm(active=True)
if form.validate_on_submit():
# check if user_metadata exists
user_metadata = models.get_user_metadata(auth.active_user()['id'])
if not user_metadata:
metadata = models.UserMetadata(auth.active_user()['id'])
DB.session.add(metadata)
DB.session.commit()
prayer = models.Prayer(user_id=auth.active_user()['id'],
name=form.name.data,
active=form.active.data,
pub_date=datetime.datetime.utcnow(),
body=form.body.data)
DB.session.add(prayer)
DB.session.commit()
flash('Gebetsanliegen abgeschickt!', 'success')
return redirect(url_for('prayer_mine'))
return render_template('prayer_add.html', form=form)
@APP.route('/prayer/mine', methods=['GET', 'POST'])
@login_required
def prayer_mine():
"""Show own prayers."""
active_user = auth.active_user()
# getting all own prayers
prayers = models.get_own_prayers(active_user['id'])
# creating dict with all forms for own prayers
edit_forms = {}
for prayer in prayers:
edit_forms[prayer.id] = forms.AddPrayerForm(prefix=str(prayer.id),
body=prayer.body,
name=prayer.name,
active=prayer.active)
if request.method == 'POST':
try:
# extract id out of form id
prayer_id = int(list(request.form)[0].split('-')[0])
# getting right form out of prayers dict
edit_prayer_form = edit_forms[prayer_id]
if edit_prayer_form.validate():
# getting prayer from id
prayer = models.get_prayer(prayer_id)
prayer.body = edit_prayer_form.body.data
prayer.name = edit_prayer_form.name.data
prayer.active = edit_prayer_form.active.data
DB.session.commit()
flash('Gebetsanliegen veraendert!', 'success')
return redirect(url_for('prayer_mine'))
except:
flash('Es ist ein Fehler aufgetreten!', 'danger')
return redirect(url_for('prayer_mine'))
return render_template('prayer_mine.html',
prayers=prayers,
edit_forms=edit_forms)
@APP.route('/prayer/<int:id>/del')
@login_required
@auth.prayer_owner
def prayer_del(id):
"""Delete prayers."""
prayer = models.get_prayer(id)
try:
DB.session.delete(prayer)
DB.session.commit()
flash('Gebetsanliegen entfernt!', 'success')
return redirect(url_for('prayer_mine'))
except:
flash('Gebetsanliegen konnte nicht entfernt werden!', 'danger')
return redirect(url_for('prayer_mine'))
@APP.route('/profile/<int:id>', methods=['GET', 'POST'])
@login_required
def profile(id):
"""User profile."""
with ct_connect.session_scope() as ct_session:
user = ct_connect.get_person_from_id(ct_session, id)
if not user:
abort(404)
# geting metadata
user_metadata = models.get_user_metadata(id)
# check if user is allowed to edit profile
user_edit = False
for session_user in session['user']:
if session_user['id'] == id:
user_edit = True
session_user['active'] = True
else:
pass
# if someone is trying to make a POST request and user_edit is False
# abort with a 403 status
if request.method == 'POST' and user_edit is False:
abort(403)
# set form to None that there is something to send to the template
# if logged in user is not allowed to edit profile
form = None
# this is for editing users own profile
if user_edit:
# set other users in session['user'] inactive
for session_user in session['user']:
if session_user['id'] != id:
session_user['active'] = False
# if there is no user_metadata db entry define it
if not user_metadata:
user_metadata = models.UserMetadata(ct_id=id)
DB.session.add(user_metadata)
# try to prefill form
form = forms.EditProfileForm(street=user[0].strasse,
postal_code=user[0].plz,
city=user[0].ort,
bio=user_metadata.bio,
twitter=user_metadata.twitter,
facebook=user_metadata.facebook)
# clicked submit
if form.validate_on_submit():
try:
# save image and set the image db true
form.user_image.data.stream.seek(0)
# metadata
user_image = save_image(form.user_image.data.stream,
request_path=request.path,
user_id=auth.active_user()['id'])
if user_image:
user_metadata.avatar_id = user_image
user_metadata.bio = form.bio.data
user_metadata.twitter = form.twitter.data
user_metadata.facebook = form.facebook.data
# save metadata to metadata db
DB.session.add(user_metadata)
DB.session.commit()
# churchtools
user[0].strasse = form.street.data
user[0].plz = form.postal_code.data
user[0].ort = form.city.data
# password
if form.password.data:
user[0].password = bcrypt.encrypt(form.password.data)
# save data to churchtools db
ct_session.add(user[0])
ct_session.commit()
flash('Profil geaendert!', 'success')
return redirect(url_for('profile', id=id))
except:
flash('Es ist ein Fehler aufgetreten!', 'danger')
return redirect(url_for('profile', id=id))
return render_template('profile.html',
user=user[0],
user_metadata=user_metadata,
user_edit=user_edit,
form=form,
mail_form=forms.MailForm())
@APP.route('/mail/<profile_or_group>/<int:id>', methods=['GET', 'POST'])
@login_required
def mail(profile_or_group, id):
"""Route for sending mails to group leaders or user."""
if profile_or_group not in ('profile', 'group'):
abort(404)
form = forms.MailForm()
if form.validate_on_submit():
try:
# create sender tuple
active_user = auth.active_user()
sender = ('{} {}'.format(unidecode(active_user['vorname']),
unidecode(active_user['name'])),
active_user['email'])
recipients = get_recipients(profile_or_group, id)
mailing.send_email(sender=sender,
recipients=recipients,
subject=form.subject.data,
body=form.body.data)
flash('Email gesendet!', 'success')
return redirect(url_for(profile_or_group, id=id))
except:
flash('Es ist ein Fehler aufgetreten!', 'danger')
return redirect(url_for(profile_or_group, id=id))
return render_template('mail.html', form=form)
@APP.route('/whatsup', methods=['GET', 'POST'])
@login_required
def whatsup_overview():
"""Overview with some metric about how many upvotes a post has."""
posts = models.get_whatsup_overview()
form = forms.AddWhatsUp()
if form.validate_on_submit():
user_id = auth.active_user()['id']
# check if user_metadata exists
user_metadata = models.get_user_metadata(user_id)
if not user_metadata:
metadata = models.UserMetadata(user_id)
DB.session.add(metadata)
DB.session.commit()
# create post
new_post = models.WhatsUp(user_id=user_id,
pub_date=datetime.datetime.utcnow(),
active=datetime.datetime.utcnow(),
subject=form.subject.data,
body=form.body.data)
DB.session.add(new_post)
DB.session.commit()
flash('Post abgeschickt!', 'success')
return redirect(redirect_url(default='whatsup_overview'))
# generate a feed auth token
token = auth.generate_feed_auth(auth.active_user())
# ct_data needs a DB.session to access the ct database
with ct_connect.session_scope() as ct_session:
return render_template('whatsup_overview.html',
posts=posts,
form=form,
ct_session=ct_session,
token=token)
@APP.route('/whatsup/new', methods=['GET', 'POST'])
@login_required
def whatsup_overview_new():
"""Ignore all upvotes. Just show newest 20 posts."""
posts = models.get_latest_whatsup_posts(20)
form = forms.AddWhatsUp()
if form.validate_on_submit():
try:
user_id = auth.active_user()['id']
# check if user_metadata exists
user_metadata = models.get_user_metadata(user_id)
if not user_metadata:
metadata = models.UserMetadata(user_id)
DB.session.add(metadata)
DB.session.commit()
# create post
new_post = models.WhatsUp(user_id=user_id,
pub_date=datetime.datetime.utcnow(),
active=datetime.datetime.utcnow(),
subject=form.subject.data,
body=form.body.data)
DB.session.add(new_post)
DB.session.commit()
flash('Post abgeschickt!', 'success')
return redirect(redirect_url(default='whatsup_overview_new'))
except:
flash('Fehler aufgetreten!', 'danger')
return redirect(redirect_url(default='whatsup_overview_new'))
# generate a feed auth token
token = auth.generate_feed_auth(auth.active_user())
# ct_data needs a DB.session to access the ct database
with ct_connect.session_scope() as ct_session:
return render_template('whatsup_overview_new.html',
posts=posts,
form=form,
ct_session=ct_session,
token=token)
@APP.route('/whatsup/<int:id>/upvote')
@login_required
def whatsup_upvote(id):
"""Route to upvote a whatsup post."""
post = models.get_whatsup_post(id)
# if already voted just redirect to overview
if post.did_i_upvote():
return redirect(redirect_url(default='whatsup_overview'))
user_id = auth.active_user()['id']
# check if user_metadata exists
user_metadata = models.get_user_metadata(user_id)
if not user_metadata:
metadata = models.UserMetadata(user_id)
DB.session.add(metadata)
DB.session.commit()
# create upvote
upvote = models.WhatsUpUpvote(post_id=id, user_id=user_id)
# set active to now
post.active = datetime.datetime.utcnow()
# write to db
DB.session.add(upvote)
DB.session.add(post)
DB.session.commit()
return redirect(redirect_url(default='whatsup_overview'))
@APP.route('/whatsup/<int:id>', methods=['GET', 'POST'])
@login_required
def whatsup_post(id):
"""Add a whatup post."""
with ct_connect.session_scope() as ct_session:
post = models.get_whatsup_post(id)
form = forms.AddWhatsUpComment()
if form.validate_on_submit():
try:
active_user = auth.active_user()
user_id = active_user['id']
# check if user_metadata exists
user_metadata = models.get_user_metadata(user_id)
if not user_metadata:
metadata = models.UserMetadata(user_id)
DB.session.add(metadata)
DB.session.commit()
# add comment
comment = models.WhatsUpComment(
post_id=id,
user_id=user_id,
pub_date=datetime.datetime.utcnow(),
body=form.body.data)
DB.session.add(comment)
DB.session.commit()
# send mail to post owner
sender = ('{} {}'.format(unidecode(active_user['vorname']),
unidecode(active_user['name'])),
active_user['email'])
recipients = [post.user.ct_data(ct_session).email]
body = '{} {} hat geschrieben:\n\n{}\n\n{}'.format(
unidecode(active_user['vorname']),
unidecode(active_user['name']),
form.body.data.encode('utf-8'),
make_external('/whatsup/{}'.format(id))).decode('utf-8')
mailing.send_email('Kommentar in "{}"'.format(post.subject),
sender, recipients, body)
flash('Kommentar abgeschickt!', 'success')
return redirect(url_for('whatsup_post', id=id))
except:
flash('Fehler aufgetreten!', 'danger')
return redirect(url_for('whatsup_post', id=id))
return render_template('whatsup_post.html',
post=post,
form=form,
ct_session=ct_session)
@APP.route('/whatsup/mine', methods=['GET', 'POST'])
@login_required
def whatsup_mine():
"""Own whatsup posts."""
active_user = auth.active_user()
# getting all own posts
posts = models.get_own_whatsup_posts(active_user['id'])
# creating a dict with all edit forms
edit_forms = {}
for post in posts:
edit_forms[post.id] = forms.AddWhatsUp(prefix=str(post.id),
subject=post.subject,
body=post.body)
if request.method == 'POST':
# extract id out of form id
post_id = int(list(request.form)[0].split('-')[0])
# getting right form out of post form dict
edit_post_form = edit_forms[post_id]
if edit_post_form.validate():
try:
# getting post
post = models.get_whatsup_post(post_id)
# chaning db entry
post.subject = edit_post_form.subject.data
post.body = edit_post_form.body.data
post.active = datetime.datetime.utcnow()
# save to db
DB.session.commit()
flash('Post veraendert!', 'success')
return redirect(url_for('whatsup_mine'))
except:
flash('Es ist ein Fehler aufgetreten!', 'danger')
return redirect(url_for('post_mine'))
return render_template('whatsup_mine.html',
posts=posts,
edit_forms=edit_forms)
@APP.route('/search', methods=['POST'])
@login_required
def search():
"""Search route."""
if not g.search_form.validate_on_submit():
return redirect(redirect_url())
return redirect(url_for('search_results', query=g.search_form.search.data))
@APP.route('/search_results/<query>')
@login_required
def search_results(query):
"""View for search results."""
with ct_connect.session_scope() as ct_session:
ct_persons = ct_connect.search_person(ct_session, query)
persons = [(person, models.get_user_metadata(person.id))
for person in ct_persons]
posts = models.search_whatsup_posts(query)
comments = models.search_whatsup_comments(query)
return render_template('search.html',
query=query,
persons=persons,
posts=posts,
comments=comments,
ct_session=ct_session)
| {
"repo_name": "ecclesianuernberg/genesis",
"path": "app/views.py",
"copies": "1",
"size": "28509",
"license": "mit",
"hash": 7822432479122565000,
"line_mean": 33.0202863962,
"line_max": 79,
"alpha_frac": 0.5316215932,
"autogenerated": false,
"ratio": 4.119202427394885,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5150824020594885,
"avg_score": null,
"num_lines": null
} |
"""All routes regarding an user."""
from flask import jsonify, g
from webargs.flaskparser import use_args
from server import user_bp
from server.extensions import db
from server.helpers.decorators import login_exempt
from server.responses import created, conflict
from server.models.user import User
from server.schemas.user import UserSchema
from server.validation.user import user_creation_fields
@user_bp.route('/api/user/<uuid:user_id>', methods=['GET'])
def info(user_id):
"""Get the info about a specific user."""
schema = UserSchema()
return jsonify(schema.dump(g.current_user).data)
@user_bp.route('/api/user/register', methods=['POST'])
@use_args(user_creation_fields)
@login_exempt
def register(args):
"""Register a new user."""
user = db.session.query(User) \
.filter(User.nickname == args['nickname']) \
.one_or_none()
if user is not None:
return conflict('This nickname is already taken.')
user = db.session.query(User) \
.filter(User.email == args['email']) \
.one_or_none()
if user is not None:
return conflict('This email is already taken.')
user = User(
nickname=args['nickname'],
email=args['email'],
password=args['password'],
)
db.session.add(user)
db.session.commit()
return created()
| {
"repo_name": "Nukesor/spacesurvival",
"path": "server/api/user.py",
"copies": "1",
"size": "1342",
"license": "mit",
"hash": 3841675618763774000,
"line_mean": 25.84,
"line_max": 59,
"alpha_frac": 0.6691505216,
"autogenerated": false,
"ratio": 3.790960451977401,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49601109735774007,
"avg_score": null,
"num_lines": null
} |
"""All routes regarding authentication."""
from datetime import datetime
from flask import jsonify
from sqlalchemy import or_
from webargs.flaskparser import use_args
from server import user_bp
from server.extensions import db
from server.models import User
from server.responses import bad_request
from server.schemas.user import UserSchema
from server.validation.user import login_fields
from server.helpers.decorators import login_exempt
@user_bp.route('/api/auth/login', methods=['POST'])
@login_exempt
@use_args(login_fields)
def login(args):
"""Endpoint for login.
Check if we can login with the credentials. We try to get the
user by searching email and nickname for the given identifier.
"""
identifier = args['identifier']
password = args['password']
# Check if the user exists
user = db.session.query(User) \
.filter(or_(User.nickname == identifier, User.email == identifier)) \
.one_or_none()
if user is None:
return bad_request('Unknown credentials or wrong password.')
# Validate password
valid_password = user.verify_password(password)
if not valid_password:
return bad_request('Unknown credentials or wrong password.')
if not user.has_valid_auth_token:
user.generate_auth_token()
user.last_login_at = datetime.utcnow()
db.session.add(user)
db.session.commit()
schema = UserSchema()
return jsonify(schema.dump(user).data)
| {
"repo_name": "Nukesor/spacesurvival",
"path": "server/api/auth.py",
"copies": "1",
"size": "1458",
"license": "mit",
"hash": -4216928518591554000,
"line_mean": 28.7551020408,
"line_max": 77,
"alpha_frac": 0.7146776406,
"autogenerated": false,
"ratio": 4.095505617977528,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5310183258577528,
"avg_score": null,
"num_lines": null
} |
"""All routes regarding queues."""
from flask import g
from server import user_bp
from server.extensions import db
from server.responses import ok, bad_request
from server.schemas.queue import QueueSchema
from server.models import Pod, QueueEntry, Queue
@user_bp.route('/api/pod/<uuid:pod_id>/queue', methods=['GET'])
def pod_queue(pod_id):
"""Get the queue of the specified pod."""
pod = db.session.query(Pod).get(pod_id)
schema = QueueSchema()
return ok(schema.dump(pod.queue).data)
@user_bp.route('/api/pod/<uuid:pod_id>/queue/entry/<uuid:entry_id>', methods=['DELETE'])
def remove_queue_entry(pod_id, entry_id):
"""Remove a specific queue entry."""
pod = db.session.query(Pod).get(pod_id)
if pod.user_id != g.current_user.id:
return bad_request("Pod doesn't belong to current user.")
"""Get the queue of the specified pod."""
queue_entry = db.session.query(QueueEntry) \
.join(Queue) \
.filter(Queue.pod == pod) \
.filter(QueueEntry.id == entry_id) \
.one_or_none()
if queue_entry is None:
return bad_request("Queue entry doesn't exist")
db.session.delete(queue_entry)
pod.queue.next_entry()
db.session.commit()
return ok()
| {
"repo_name": "Nukesor/spacesurvival",
"path": "server/api/queue.py",
"copies": "1",
"size": "1242",
"license": "mit",
"hash": 1901580822291149600,
"line_mean": 29.2926829268,
"line_max": 88,
"alpha_frac": 0.6610305958,
"autogenerated": false,
"ratio": 3.4214876033057853,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.95811479606843,
"avg_score": 0.00027404768429706766,
"num_lines": 41
} |
"""All ROX applications that can save documents should use drag-and-drop saving.
The document itself should use the Saveable mix-in class and override some of the
methods to actually do the save.
If you want to save a selection then you can create a new object specially for
the purpose and pass that to the SaveBox."""
import os, sys
import rox
from rox import alert, info, g, _, filer, escape
from rox import choices, get_local_path
gdk = g.gdk
TARGET_XDS = 0
TARGET_RAW = 1
def _write_xds_property(context, value):
win = context.source_window
if value:
win.property_change('XdndDirectSave0', 'text/plain', 8,
gdk.PROP_MODE_REPLACE,
value)
else:
win.property_delete('XdndDirectSave0')
def _read_xds_property(context, delete):
win = context.source_window
retval = win.property_get('XdndDirectSave0', 'text/plain', delete)
if retval:
return retval[2]
return None
def image_for_type(type):
'Search <Choices> for a suitable icon. Returns a pixbuf, or None.'
from icon_theme import rox_theme
media, subtype = type.split('/', 1)
path = choices.load('MIME-icons', media + '_' + subtype + '.png')
if not path:
icon = 'mime-%s:%s' % (media, subtype)
try:
path = rox_theme.lookup_icon(icon, 48)
if not path:
icon = 'mime-%s' % media
path = rox_theme.lookup_icon(icon, 48)
except:
print "Error loading MIME icon"
if not path:
path = choices.load('MIME-icons', media + '.png')
if path:
return gdk.pixbuf_new_from_file(path)
else:
return None
def _report_save_error():
"Report a SaveAbort nicely, otherwise use report_exception()"
type, value = sys.exc_info()[:2]
if isinstance(value, AbortSave):
value.show()
else:
rox.report_exception()
class AbortSave(Exception):
"""Raise this to cancel a save. If a message is given, it is displayed
in a normal alert box (not in the report_exception style). If the
message is None, no message is shown (you should have already shown
it!)"""
def __init__(self, message):
self.message = message
def show(self):
if self.message:
rox.alert(self.message)
class Saveable:
"""This class describes the interface that an object must provide
to work with the SaveBox/SaveArea widgets. Inherit from it if you
want to save. All methods can be overridden, but normally only
save_to_stream() needs to be. You can also set save_last_stat to
the result of os.stat(filename) when loading a file to make ROX-Lib
restore permissions and warn about other programs editing the file."""
save_last_stat = None
def set_uri(self, uri):
"""When the data is safely saved somewhere this is called
with its new name. Mark your data as unmodified and update
the filename for next time. Saving to another application
won't call this method. Default method does nothing."""
pass
def save_to_stream(self, stream):
"""Write the data to save to the stream. When saving to a
local file, stream will be the actual file, otherwise it is a
cStringIO object."""
raise Exception('You forgot to write the save_to_stream() method...'
'silly programmer!')
def save_to_file(self, path):
"""Write data to file. Raise an exception on error.
The default creates a temporary file, uses save_to_stream() to
write to it, then renames it over the original. If the temporary file
can't be created, it writes directly over the original."""
# Ensure the directory exists...
dir = os.path.dirname(path)
if not os.path.isdir(dir):
from rox import fileutils
try:
fileutils.makedirs(dir)
except OSError:
raise AbortSave(None) # (message already shown)
import random
tmp = 'tmp-' + `random.randrange(1000000)`
tmp = os.path.join(dir, tmp)
def open(path):
return os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0600), 'wb')
try:
file = open(tmp)
except:
# Can't create backup... try a direct write
tmp = None
file = open(path)
try:
try:
self.save_to_stream(file)
finally:
file.close()
if tmp:
os.rename(tmp, path)
except:
_report_save_error()
if tmp and os.path.exists(tmp):
if os.path.getsize(tmp) == 0 or \
rox.confirm(_("Delete temporary file '%s'?") % tmp,
g.STOCK_DELETE):
os.unlink(tmp)
raise AbortSave(None)
self.save_set_permissions(path)
filer.examine(path)
def save_to_selection(self, selection_data):
"""Write data to the selection. The default method uses save_to_stream()."""
from cStringIO import StringIO
stream = StringIO()
self.save_to_stream(stream)
selection_data.set(selection_data.target, 8, stream.getvalue())
save_mode = None # For backwards compat
def save_set_permissions(self, path):
"""The default save_to_file() creates files with the mode 0600
(user read/write only). After saving has finished, it calls this
method to set the final permissions. The save_set_permissions():
- sets it to 0666 masked with the umask (if save_mode is None), or
- sets it to save_last_stat.st_mode (not masked) otherwise."""
if self.save_last_stat is not None:
save_mode = self.save_last_stat.st_mode
else:
save_mode = self.save_mode
if save_mode is not None:
os.chmod(path, save_mode)
else:
mask = os.umask(0077) # Get the current umask
os.umask(mask) # Set it back how it was
os.chmod(path, 0666 & ~mask)
def save_done(self):
"""Time to close the savebox. Default method does nothing."""
pass
def discard(self):
"""Discard button clicked, or document safely saved. Only called if a SaveBox
was created with discard=1.
The user doesn't want the document any more, even if it's modified and unsaved.
Delete it."""
raise Exception("Sorry... my programmer forgot to tell me how to handle Discard!")
save_to_stream._rox_default = 1
save_to_file._rox_default = 1
save_to_selection._rox_default = 1
def can_save_to_file(self):
"""Indicates whether we have a working save_to_stream or save_to_file
method (ie, whether we can save to files). Default method checks that
one of these two methods has been overridden."""
if not hasattr(self.save_to_stream, '_rox_default'):
return 1 # Have user-provided save_to_stream
if not hasattr(self.save_to_file, '_rox_default'):
return 1 # Have user-provided save_to_file
return 0
def can_save_to_selection(self):
"""Indicates whether we have a working save_to_stream or save_to_selection
method (ie, whether we can save to selections). Default methods checks that
one of these two methods has been overridden."""
if not hasattr(self.save_to_stream, '_rox_default'):
return 1 # Have user-provided save_to_stream
if not hasattr(self.save_to_selection, '_rox_default'):
return 1 # Have user-provided save_to_file
return 0
def save_cancelled(self):
"""If you multitask during a save (using a recursive mainloop) then the
user may click on the Cancel button. This function gets called if so, and
should cause the recursive mainloop to return."""
raise Exception("Lazy programmer error: can't abort save!")
class SaveArea(g.VBox):
"""A SaveArea contains the widgets used in a save box. You can use
this to put a savebox area in a larger window."""
def __init__(self, document, uri, type):
"""'document' must be a subclass of Saveable.
'uri' is the file's current location, or a simple name (eg 'TextFile')
if it has never been saved.
'type' is the MIME-type to use (eg 'text/plain').
"""
g.VBox.__init__(self, False, 0)
self.document = document
self.initial_uri = uri
drag_area = self._create_drag_area(type)
self.pack_start(drag_area, True, True, 0)
drag_area.show_all()
entry = g.Entry()
entry.connect('activate', lambda w: self.save_to_file_in_entry())
self.entry = entry
self.pack_start(entry, False, True, 4)
entry.show()
entry.set_text(uri)
def _set_icon(self, type):
pixbuf = image_for_type(type)
if pixbuf:
self.icon.set_from_pixbuf(pixbuf)
else:
self.icon.set_from_stock(g.STOCK_MISSING_IMAGE, g.ICON_SIZE_DND)
def _create_drag_area(self, type):
align = g.Alignment()
align.set(.5, .5, 0, 0)
self.drag_box = g.EventBox()
self.drag_box.set_border_width(4)
self.drag_box.add_events(gdk.BUTTON_PRESS_MASK)
align.add(self.drag_box)
self.icon = g.Image()
self._set_icon(type)
self._set_drag_source(type)
self.drag_box.connect('drag_begin', self.drag_begin)
self.drag_box.connect('drag_end', self.drag_end)
self.drag_box.connect('drag_data_get', self.drag_data_get)
self.drag_in_progress = 0
self.drag_box.add(self.icon)
return align
def set_type(self, type, icon = None):
"""Change the icon and drag target to 'type'.
If 'icon' is given (as a GtkImage) then that icon is used,
otherwise an appropriate icon for the type is used."""
if icon:
self.icon.set_from_pixbuf(icon.get_pixbuf())
else:
self._set_icon(type)
self._set_drag_source(type)
def _set_drag_source(self, type):
if self.document.can_save_to_file():
targets = [('XdndDirectSave0', 0, TARGET_XDS)]
else:
targets = []
if self.document.can_save_to_selection():
targets = targets + [(type, 0, TARGET_RAW),
('application/octet-stream', 0, TARGET_RAW)]
if not targets:
raise Exception("Document %s can't save!" % self.document)
self.drag_box.drag_source_set(gdk.BUTTON1_MASK | gdk.BUTTON3_MASK,
targets,
gdk.ACTION_COPY | gdk.ACTION_MOVE)
def save_to_file_in_entry(self):
"""Call this when the user clicks on an OK button you provide."""
uri = self.entry.get_text()
path = get_local_path(escape(uri))
if path:
if not self.confirm_new_path(path):
return
try:
self.set_sensitive(False)
try:
self.document.save_to_file(path)
finally:
self.set_sensitive(True)
self.set_uri(path)
self.save_done()
except:
_report_save_error()
else:
rox.info(_("Drag the icon to a directory viewer\n"
"(or enter a full pathname)"))
def drag_begin(self, drag_box, context):
self.drag_in_progress = 1
self.destroy_on_drag_end = 0
self.using_xds = 0
self.data_sent = 0
try:
pixbuf = self.icon.get_pixbuf()
if pixbuf:
drag_box.drag_source_set_icon_pixbuf(pixbuf)
except:
# This can happen if we set the broken image...
import traceback
traceback.print_exc()
uri = self.entry.get_text()
if uri:
i = uri.rfind('/')
if (i == -1):
leaf = uri
else:
leaf = uri[i + 1:]
else:
leaf = _('Unnamed')
_write_xds_property(context, leaf)
def drag_data_get(self, widget, context, selection_data, info, time):
if info == TARGET_RAW:
try:
self.set_sensitive(False)
try:
self.document.save_to_selection(selection_data)
finally:
self.set_sensitive(True)
except:
_report_save_error()
_write_xds_property(context, None)
return
self.data_sent = 1
_write_xds_property(context, None)
if self.drag_in_progress:
self.destroy_on_drag_end = 1
else:
self.save_done()
return
elif info != TARGET_XDS:
_write_xds_property(context, None)
alert("Bad target requested!")
return
# Using XDS:
#
# Get the path that the destination app wants us to save to.
# If it's local, save and return Success
# (or Error if save fails)
# If it's remote, return Failure (remote may try another method)
# If no URI is given, return Error
to_send = 'E'
uri = _read_xds_property(context, False)
if uri:
path = get_local_path(uri)
if path:
if not self.confirm_new_path(path):
to_send = 'E'
else:
try:
self.set_sensitive(False)
try:
self.document.save_to_file(path)
finally:
self.set_sensitive(True)
self.data_sent = True
except:
_report_save_error()
self.data_sent = False
if self.data_sent:
to_send = 'S'
# (else Error)
else:
to_send = 'F' # Non-local transfer
else:
alert("Remote application wants to use " +
"Direct Save, but I can't read the " +
"XdndDirectSave0 (type text/plain) " +
"property.")
selection_data.set(selection_data.target, 8, to_send)
if to_send != 'E':
_write_xds_property(context, None)
path = get_local_path(uri)
if path:
self.set_uri(path)
else:
self.set_uri(uri)
if self.data_sent:
self.save_done()
def confirm_new_path(self, path):
"""User wants to save to this path. If it's different to the original path,
check that it doesn't exist and ask for confirmation if it does.
If document.save_last_stat is set, compare with os.stat for an existing file
and warn about changes.
Returns true to go ahead with the save."""
if not os.path.exists(path):
return True
if path == self.initial_uri:
if self.document.save_last_stat is None:
return True # OK. Nothing to compare with.
last = self.document.save_last_stat
stat = os.stat(path)
msg = []
if stat.st_mode != last.st_mode:
msg.append(_("Permissions changed from %o to %o.") % \
(last.st_mode, stat.st_mode))
if stat.st_size != last.st_size:
msg.append(_("Size was %d bytes; now %d bytes.") % \
(last.st_size, stat.st_size))
if stat.st_mtime != last.st_mtime:
msg.append(_("Modification time changed."))
if not msg:
return True # No change detected
return rox.confirm("File '%s' edited by another program since last load/save. "
"Really save (discarding other changes)?\n\n%s" %
(path, '\n'.join(msg)), g.STOCK_DELETE)
return rox.confirm(_("File '%s' already exists -- overwrite it?") % path,
g.STOCK_DELETE, _('_Overwrite'))
def set_uri(self, uri):
"""Data is safely saved somewhere. Update the document's URI and save_last_stat (for local saves).
Internal."""
path = get_local_path(uri)
if path is not None:
self.document.save_last_stat = os.stat(path) # Record for next time
self.document.set_uri(uri)
def drag_end(self, widget, context):
self.drag_in_progress = 0
if self.destroy_on_drag_end:
self.save_done()
def save_done(self):
self.document.save_done()
class SaveBox(g.Dialog):
"""A SaveBox is a GtkDialog that contains a SaveArea and, optionally, a Discard button.
Calls rox.toplevel_(un)ref automatically.
"""
def __init__(self, document, uri, type = 'text/plain', discard = False):
"""See SaveArea.__init__.
If discard is True then an extra discard button is added to the dialog."""
g.Dialog.__init__(self)
self.set_has_separator(False)
self.add_button(g.STOCK_CANCEL, g.RESPONSE_CANCEL)
self.add_button(g.STOCK_SAVE, g.RESPONSE_OK)
self.set_default_response(g.RESPONSE_OK)
if discard:
discard_area = g.HButtonBox()
def discard_clicked(event):
document.discard()
self.destroy()
button = rox.ButtonMixed(g.STOCK_DELETE, _('_Discard'))
discard_area.pack_start(button, False, True, 2)
button.connect('clicked', discard_clicked)
button.unset_flags(g.CAN_FOCUS)
button.set_flags(g.CAN_DEFAULT)
self.vbox.pack_end(discard_area, False, True, 0)
self.vbox.reorder_child(discard_area, 0)
discard_area.show_all()
self.set_title(_('Save As:'))
self.set_position(g.WIN_POS_MOUSE)
self.set_wmclass('savebox', 'Savebox')
self.set_border_width(1)
# Might as well make use of the new nested scopes ;-)
self.set_save_in_progress(0)
class BoxedArea(SaveArea):
def set_uri(area, uri):
SaveArea.set_uri(area, uri)
if discard:
document.discard()
def save_done(area):
document.save_done()
self.destroy()
def set_sensitive(area, sensitive):
if self.window:
# Might have been destroyed by now...
self.set_save_in_progress(not sensitive)
SaveArea.set_sensitive(area, sensitive)
save_area = BoxedArea(document, uri, type)
self.save_area = save_area
save_area.show_all()
self.build_main_area()
i = uri.rfind('/')
i = i + 1
# Have to do this here, or the selection gets messed up
save_area.entry.grab_focus()
g.Editable.select_region(save_area.entry, i, -1) # PyGtk bug
#save_area.entry.select_region(i, -1)
def got_response(widget, response):
if self.save_in_progress:
try:
document.save_cancelled()
except:
rox.report_exception()
return
if response == g.RESPONSE_CANCEL:
self.destroy()
elif response == g.RESPONSE_OK:
self.save_area.save_to_file_in_entry()
elif response == g.RESPONSE_DELETE_EVENT:
pass
else:
raise Exception('Unknown response!')
self.connect('response', got_response)
rox.toplevel_ref()
self.connect('destroy', lambda w: rox.toplevel_unref())
def set_type(self, type, icon = None):
"""See SaveArea's method of the same name."""
self.save_area.set_type(type, icon)
def build_main_area(self):
"""Place self.save_area somewhere in self.vbox. Override this
for more complicated layouts."""
self.vbox.add(self.save_area)
def set_save_in_progress(self, in_progress):
"""Called when saving starts and ends. Shade/unshade any widgets as
required. Make sure you call the default method too!
Not called if box is destroyed from a recursive mainloop inside
a save_to_* function."""
self.set_response_sensitive(g.RESPONSE_OK, not in_progress)
self.save_in_progress = in_progress
class StringSaver(SaveBox, Saveable):
"""A very simple SaveBox which saves the string passed to its constructor."""
def __init__(self, string, name):
"""'string' is the string to save. 'name' is the default filename"""
SaveBox.__init__(self, self, name, 'text/plain')
self.string = string
def save_to_stream(self, stream):
stream.write(self.string)
class SaveFilter(Saveable):
"""This Saveable runs a process in the background to generate the
save data. Any python streams can be used as the input to and
output from the process.
The output from the subprocess is saved to the output stream (either
directly, for fileno() streams, or via another temporary file).
If the process returns a non-zero exit status or writes to stderr,
the save fails (messages written to stderr are displayed).
"""
stdin = None
def set_stdin(self, stream):
"""Use 'stream' as stdin for the process. If stream is not a
seekable fileno() stream then it is copied to a temporary file
at this point. If None, the child process will get /dev/null on
stdin."""
if stream is not None:
if hasattr(stream, 'fileno') and hasattr(stream, 'seek'):
self.stdin = stream
else:
import tempfile
import shutil
self.stdin = tempfile.TemporaryFile()
shutil.copyfileobj(stream, self.stdin)
else:
self.stdin = None
def save_to_stream(self, stream):
from processes import PipeThroughCommand
assert not hasattr(self, 'child_run') # No longer supported
self.process = PipeThroughCommand(self.command, self.stdin, stream)
self.process.wait()
self.process = None
def save_cancelled(self):
"""Send SIGTERM to the child processes."""
if self.process:
self.killed = 1
self.process.kill()
| {
"repo_name": "leuschel/logen",
"path": "old_logen/pylogen/rox/saving.py",
"copies": "1",
"size": "18900",
"license": "apache-2.0",
"hash": -6616033883264404000,
"line_mean": 29.8319738989,
"line_max": 100,
"alpha_frac": 0.6806349206,
"autogenerated": false,
"ratio": 3.075170842824601,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4255805763424601,
"avg_score": null,
"num_lines": null
} |
__all__ = ['_run_eidos_on_text', 'process_text_bio',
'process_json_bio', 'process_json_bio_entities',
'process_text_bio_entities', 'eidos_reader',
'initialize_reader']
import json
import logging
from indra.sources.eidos import client as eidos_client
from .bio_processor import EidosBioProcessor
logger = logging.getLogger(__name__)
try:
# For text reading
from .reader import EidosReader
eidos_reader = EidosReader()
except Exception as e:
logger.warning('Could not instantiate Eidos reader, local reading '
'will not be available.')
eidos_reader = None
def _run_eidos_on_text(text, save_json='eidos_output.json',
webservice=None):
if not webservice:
if eidos_reader is None:
logger.error('Eidos reader is not available.')
return None
json_dict = eidos_reader.process_text(text)
else:
if webservice.endswith('/'):
webservice = webservice[:-1]
json_dict = eidos_client.process_text(text, webservice=webservice)
if json_dict and save_json:
with open(save_json, 'wt') as fh:
json.dump(json_dict, fh, indent=2)
return json_dict
def process_text_bio(text, save_json='eidos_output.json', webservice=None,
grounder=None):
"""Return an EidosProcessor by processing the given text.
This constructs a reader object via Java and extracts mentions
from the text. It then serializes the mentions into JSON and
processes the result with process_json.
Parameters
----------
text : str
The text to be processed.
save_json : Optional[str]
The name of a file in which to dump the JSON output of Eidos.
webservice : Optional[str]
An Eidos reader web service URL to send the request to.
If None, the reading is assumed to be done with the Eidos JAR rather
than via a web service. Default: None
grounder : Optional[function]
A function which takes a text and an optional context as argument
and returns a dict of groundings.
Returns
-------
ep : EidosProcessor
An EidosProcessor containing the extracted INDRA Statements in its
statements attribute.
"""
json_dict = _run_eidos_on_text(text, save_json, webservice)
if json_dict:
return process_json_bio(json_dict, grounder=grounder)
return None
def process_json_bio(json_dict, grounder=None):
"""Return EidosProcessor with grounded Activation/Inhibition statements.
Parameters
----------
json_dict : dict
The JSON-LD dict to be processed.
grounder : Optional[function]
A function which takes a text and an optional context as argument
and returns a dict of groundings.
Returns
-------
ep : EidosProcessor
A EidosProcessor containing the extracted INDRA Statements
in its statements attribute.
"""
from indra.sources.eidos.bio_processor import EidosBioProcessor
ep = EidosBioProcessor(json_dict, grounder=grounder)
ep.extract_statements()
return ep
def process_json_bio_entities(json_dict, grounder=None):
"""Return INDRA Agents grounded to biological ontologies extracted
from Eidos JSON-LD.
Parameters
----------
json_dict : dict
The JSON-LD dict to be processed.
grounder : Optional[function]
A function which takes a text and an optional context as argument
and returns a dict of groundings.
Returns
-------
list of indra.statements.Agent
A list of INDRA Agents which are derived from concepts extracted
by Eidos from text.
"""
from .bio_processor import get_agent_bio
if not json_dict:
return []
ep = EidosBioProcessor(json_dict, grounder=grounder)
ep.extract_causal_relations()
ep.extract_events()
events = ep.get_all_events()
agents = []
for event in events:
context = event.evidence[0].text
agent = get_agent_bio(event.concept, context=context,
grounder=grounder)
agents.append(agent)
return agents
def process_text_bio_entities(text, webservice=None, grounder=None):
"""Return INDRA Agents grounded to biological ontologies extracted
from text.
Parameters
----------
text : str
Text to be processed.
webservice : Optional[str]
An Eidos reader web service URL to send the request to.
If None, the reading is assumed to be done with the Eidos JAR rather
than via a web service. Default: None
grounder : Optional[function]
A function which takes a text and an optional context as argument
and returns a dict of groundings.
Returns
-------
list of indra.statements.Agent
A list of INDRA Agents which are derived from concepts extracted
by Eidos from text.
"""
json_dict = _run_eidos_on_text(text, None, webservice=webservice)
return process_json_bio_entities(json_dict, grounder=grounder)
def initialize_reader():
"""Instantiate an Eidos reader for fast subsequent reading."""
eidos_reader.process_text('')
| {
"repo_name": "johnbachman/indra",
"path": "indra/sources/eidos/api.py",
"copies": "3",
"size": "5226",
"license": "bsd-2-clause",
"hash": -57365963110904650,
"line_mean": 31.6625,
"line_max": 76,
"alpha_frac": 0.6553769613,
"autogenerated": false,
"ratio": 3.9441509433962265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6099527904696227,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.