code
stringlengths 1
25.8M
| language
stringclasses 18
values | source
stringclasses 4
values | repo
stringclasses 78
values | path
stringlengths 0
268
|
|---|---|---|---|---|
import sys
from ctypes.util import find_library
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.sqlite3.base import (Database,
DatabaseWrapper as SQLiteDatabaseWrapper,
DatabaseFeatures as SQLiteDatabaseFeatures, SQLiteCursorWrapper)
from django.contrib.gis.db.backends.base import BaseSpatialFeatures
from django.contrib.gis.db.backends.spatialite.client import SpatiaLiteClient
from django.contrib.gis.db.backends.spatialite.creation import SpatiaLiteCreation
from django.contrib.gis.db.backends.spatialite.introspection import SpatiaLiteIntrospection
from django.contrib.gis.db.backends.spatialite.operations import SpatiaLiteOperations
from django.contrib.gis.db.backends.spatialite.schema import SpatialiteSchemaEditor
from django.utils import six
from django.utils.functional import cached_property
class DatabaseFeatures(BaseSpatialFeatures, SQLiteDatabaseFeatures):
supports_distance_geodetic = False
# SpatiaLite can only count vertices in LineStrings
supports_num_points_poly = False
@cached_property
def supports_initspatialmetadata_in_one_transaction(self):
# SpatiaLite 4.1+ support initializing all metadata in one transaction
# which can result in a significant performance improvement when
# creating the database.
return self.connection.ops.spatial_version >= (4, 1, 0)
class DatabaseWrapper(SQLiteDatabaseWrapper):
SchemaEditorClass = SpatialiteSchemaEditor
def __init__(self, *args, **kwargs):
# Before we get too far, make sure pysqlite 2.5+ is installed.
if Database.version_info < (2, 5, 0):
raise ImproperlyConfigured('Only versions of pysqlite 2.5+ are '
'compatible with SpatiaLite and GeoDjango.')
# Trying to find the location of the SpatiaLite library.
# Here we are figuring out the path to the SpatiaLite library
# (`libspatialite`). If it's not in the system library path (e.g., it
# cannot be found by `ctypes.util.find_library`), then it may be set
# manually in the settings via the `SPATIALITE_LIBRARY_PATH` setting.
self.spatialite_lib = getattr(settings, 'SPATIALITE_LIBRARY_PATH',
find_library('spatialite'))
if not self.spatialite_lib:
raise ImproperlyConfigured('Unable to locate the SpatiaLite library. '
'Make sure it is in your library path, or set '
'SPATIALITE_LIBRARY_PATH in your settings.'
)
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = SpatiaLiteOperations(self)
self.client = SpatiaLiteClient(self)
self.creation = SpatiaLiteCreation(self)
self.introspection = SpatiaLiteIntrospection(self)
def get_new_connection(self, conn_params):
conn = super(DatabaseWrapper, self).get_new_connection(conn_params)
# Enabling extension loading on the SQLite connection.
try:
conn.enable_load_extension(True)
except AttributeError:
raise ImproperlyConfigured(
'The pysqlite library does not support C extension loading. '
'Both SQLite and pysqlite must be configured to allow '
'the loading of extensions to use SpatiaLite.')
# Loading the SpatiaLite library extension on the connection, and returning
# the created cursor.
cur = conn.cursor(factory=SQLiteCursorWrapper)
try:
cur.execute("SELECT load_extension(%s)", (self.spatialite_lib,))
except Exception as msg:
new_msg = (
'Unable to load the SpatiaLite library extension '
'"%s" because: %s') % (self.spatialite_lib, msg)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(new_msg), sys.exc_info()[2])
cur.close()
return conn
def prepare_database(self):
super(DatabaseWrapper, self).prepare_database()
# Check if spatial metadata have been initialized in the database
with self.cursor() as cursor:
cursor.execute("PRAGMA table_info(geometry_columns);")
if cursor.fetchall() == []:
arg = "1" if self.features.supports_initspatialmetadata_in_one_transaction else ""
cursor.execute("SELECT InitSpatialMetaData(%s)" % arg)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Dimitrios Tydeas Mengidis <tydeas.dr@gmail.com>
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: composer
author:
- "Dimitrios Tydeas Mengidis (@dmtrs)"
- "René Moser (@resmo)"
short_description: Dependency Manager for PHP
version_added: "1.6"
description:
- Composer is a tool for dependency management in PHP. It allows you to declare the dependent libraries your project needs and it will install them in your project for you
options:
command:
version_added: "1.8"
description:
- Composer command like "install", "update" and so on
required: false
default: install
working_dir:
description:
- Directory of your project ( see --working-dir )
required: true
default: null
aliases: [ "working-dir" ]
prefer_source:
description:
- Forces installation from package sources when possible ( see --prefer-source )
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: [ "prefer-source" ]
prefer_dist:
description:
- Forces installation from package dist even for dev versions ( see --prefer-dist )
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: [ "prefer-dist" ]
no_dev:
description:
- Disables installation of require-dev packages ( see --no-dev )
required: false
default: "yes"
choices: [ "yes", "no" ]
aliases: [ "no-dev" ]
no_scripts:
description:
- Skips the execution of all scripts defined in composer.json ( see --no-scripts )
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: [ "no-scripts" ]
no_plugins:
description:
- Disables all plugins ( see --no-plugins )
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: [ "no-plugins" ]
optimize_autoloader:
description:
- Optimize autoloader during autoloader dump ( see --optimize-autoloader ). Convert PSR-0/4 autoloading to classmap to get a faster autoloader. This is recommended especially for production, but can take a bit of time to run so it is currently not done by default.
required: false
default: "yes"
choices: [ "yes", "no" ]
aliases: [ "optimize-autoloader" ]
ignore_platform_reqs:
version_added: "2.0"
description:
- Ignore php, hhvm, lib-* and ext-* requirements and force the installation even if the local machine does not fulfill these.
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: [ "ignore-platform-reqs" ]
requirements:
- php
- composer installed in bin path (recommended /usr/local/bin)
notes:
- Default options that are always appended in each execution are --no-ansi, --no-interaction and --no-progress if available.
'''
EXAMPLES = '''
# Downloads and installs all the libs and dependencies outlined in the /path/to/project/composer.lock
- composer: command=install working_dir=/path/to/project
'''
import os
import re
try:
import json
except ImportError:
import simplejson as json
def parse_out(string):
return re.sub("\s+", " ", string).strip()
def has_changed(string):
return "Nothing to install or update" not in string
def get_available_options(module, command='install'):
# get all availabe options from a composer command using composer help to json
rc, out, err = composer_command(module, "help %s --format=json" % command)
if rc != 0:
output = parse_out(err)
module.fail_json(msg=output)
command_help_json = json.loads(out)
return command_help_json['definition']['options']
def composer_command(module, command, options=[]):
php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"])
cmd = "%s %s %s %s" % (php_path, composer_path, command, " ".join(options))
return module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec = dict(
command = dict(default="install", type="str", required=False),
working_dir = dict(aliases=["working-dir"], required=True),
prefer_source = dict(default="no", type="bool", aliases=["prefer-source"]),
prefer_dist = dict(default="no", type="bool", aliases=["prefer-dist"]),
no_dev = dict(default="yes", type="bool", aliases=["no-dev"]),
no_scripts = dict(default="no", type="bool", aliases=["no-scripts"]),
no_plugins = dict(default="no", type="bool", aliases=["no-plugins"]),
optimize_autoloader = dict(default="yes", type="bool", aliases=["optimize-autoloader"]),
ignore_platform_reqs = dict(default="no", type="bool", aliases=["ignore-platform-reqs"]),
),
supports_check_mode=True
)
# Get composer command with fallback to default
command = module.params['command']
available_options = get_available_options(module=module, command=command)
options = []
# Default options
default_options = [
'no-ansi',
'no-interaction',
'no-progress',
]
for option in default_options:
if option in available_options:
option = "--%s" % option
options.append(option)
options.extend(['--working-dir', os.path.abspath(module.params['working_dir'])])
option_params = {
'prefer_source': 'prefer-source',
'prefer_dist': 'prefer-dist',
'no_dev': 'no-dev',
'no_scripts': 'no-scripts',
'no_plugins': 'no_plugins',
'optimize_autoloader': 'optimize-autoloader',
'ignore_platform_reqs': 'ignore-platform-reqs',
}
for param, option in option_params.iteritems():
if module.params.get(param) and option in available_options:
option = "--%s" % option
options.append(option)
if module.check_mode:
options.append('--dry-run')
rc, out, err = composer_command(module, command, options)
if rc != 0:
output = parse_out(err)
module.fail_json(msg=output)
else:
# Composer version > 1.0.0-alpha9 now use stderr for standard notification messages
output = parse_out(out + err)
module.exit_json(changed=has_changed(output), msg=output)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849.signature
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module represents a direct implementation of `section 3.4`_ of the spec.
Terminology:
* Client: software interfacing with an OAuth API
* Server: the API provider
* Resource Owner: the user who is granting authorization to the client
Steps for signing a request:
1. Collect parameters from the uri query, auth header, & body
2. Normalize those parameters
3. Normalize the uri
4. Pass the normalized uri, normalized parameters, and http method to
construct the base string
5. Pass the base string and any keys needed to a signing function
.. _`section 3.4`: http://tools.ietf.org/html/rfc5849#section-3.4
"""
from __future__ import absolute_import, unicode_literals
import binascii
import hashlib
import hmac
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
from . import utils
from oauthlib.common import urldecode, extract_params, safe_string_equals
from oauthlib.common import bytes_type, unicode_type
def construct_base_string(http_method, base_string_uri,
normalized_encoded_request_parameters):
"""**String Construction**
Per `section 3.4.1.1`_ of the spec.
For example, the HTTP request::
POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Authorization: OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="bYT5CMsGcbgUdFHObYMEfcx6bsw%3D"
c2&a3=2+q
is represented by the following signature base string (line breaks
are for display purposes only)::
POST&http%3A%2F%2Fexample.com%2Frequest&a2%3Dr%2520b%26a3%3D2%2520q
%26a3%3Da%26b5%3D%253D%25253D%26c%2540%3D%26c2%3D%26oauth_consumer_
key%3D9djdj82h48djs9d2%26oauth_nonce%3D7d8f3e4a%26oauth_signature_m
ethod%3DHMAC-SHA1%26oauth_timestamp%3D137131201%26oauth_token%3Dkkk
9d7dh3k39sjv7
.. _`section 3.4.1.1`: http://tools.ietf.org/html/rfc5849#section-3.4.1.1
"""
# The signature base string is constructed by concatenating together,
# in order, the following HTTP request elements:
# 1. The HTTP request method in uppercase. For example: "HEAD",
# "GET", "POST", etc. If the request uses a custom HTTP method, it
# MUST be encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
base_string = utils.escape(http_method.upper())
# 2. An "&" character (ASCII code 38).
base_string += '&'
# 3. The base string URI from `Section 3.4.1.2`_, after being encoded
# (`Section 3.6`_).
#
# .. _`Section 3.4.1.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.2
# .. _`Section 3.4.6`: http://tools.ietf.org/html/rfc5849#section-3.4.6
base_string += utils.escape(base_string_uri)
# 4. An "&" character (ASCII code 38).
base_string += '&'
# 5. The request parameters as normalized in `Section 3.4.1.3.2`_, after
# being encoded (`Section 3.6`).
#
# .. _`Section 3.4.1.3.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
# .. _`Section 3.4.6`: http://tools.ietf.org/html/rfc5849#section-3.4.6
base_string += utils.escape(normalized_encoded_request_parameters)
return base_string
def normalize_base_string_uri(uri, host=None):
"""**Base String URI**
Per `section 3.4.1.2`_ of the spec.
For example, the HTTP request::
GET /r%20v/X?id=123 HTTP/1.1
Host: EXAMPLE.COM:80
is represented by the base string URI: "http://example.com/r%20v/X".
In another example, the HTTPS request::
GET /?q=1 HTTP/1.1
Host: www.example.net:8080
is represented by the base string URI: "https://www.example.net:8080/".
.. _`section 3.4.1.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.2
The host argument overrides the netloc part of the uri argument.
"""
if not isinstance(uri, unicode_type):
raise ValueError('uri must be a unicode object.')
# FIXME: urlparse does not support unicode
scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri)
# The scheme, authority, and path of the request resource URI `RFC3986`
# are included by constructing an "http" or "https" URI representing
# the request resource (without the query or fragment) as follows:
#
# .. _`RFC3986`: http://tools.ietf.org/html/rfc3986
if not scheme or not netloc:
raise ValueError('uri must include a scheme and netloc')
# Per `RFC 2616 section 5.1.2`_:
#
# Note that the absolute path cannot be empty; if none is present in
# the original URI, it MUST be given as "/" (the server root).
#
# .. _`RFC 2616 section 5.1.2`: http://tools.ietf.org/html/rfc2616#section-5.1.2
if not path:
path = '/'
# 1. The scheme and host MUST be in lowercase.
scheme = scheme.lower()
netloc = netloc.lower()
# 2. The host and port values MUST match the content of the HTTP
# request "Host" header field.
if host is not None:
netloc = host.lower()
# 3. The port MUST be included if it is not the default port for the
# scheme, and MUST be excluded if it is the default. Specifically,
# the port MUST be excluded when making an HTTP request `RFC2616`_
# to port 80 or when making an HTTPS request `RFC2818`_ to port 443.
# All other non-default port numbers MUST be included.
#
# .. _`RFC2616`: http://tools.ietf.org/html/rfc2616
# .. _`RFC2818`: http://tools.ietf.org/html/rfc2818
default_ports = (
('http', '80'),
('https', '443'),
)
if ':' in netloc:
host, port = netloc.split(':', 1)
if (scheme, port) in default_ports:
netloc = host
return urlparse.urlunparse((scheme, netloc, path, params, '', ''))
# ** Request Parameters **
#
# Per `section 3.4.1.3`_ of the spec.
#
# In order to guarantee a consistent and reproducible representation of
# the request parameters, the parameters are collected and decoded to
# their original decoded form. They are then sorted and encoded in a
# particular manner that is often different from their original
# encoding scheme, and concatenated into a single string.
#
# .. _`section 3.4.1.3`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3
def collect_parameters(uri_query='', body=[], headers=None,
exclude_oauth_signature=True, with_realm=False):
"""**Parameter Sources**
Parameters starting with `oauth_` will be unescaped.
Body parameters must be supplied as a dict, a list of 2-tuples, or a
formencoded query string.
Headers must be supplied as a dict.
Per `section 3.4.1.3.1`_ of the spec.
For example, the HTTP request::
POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Authorization: OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="djosJKDKJSD8743243%2Fjdk33klY%3D"
c2&a3=2+q
contains the following (fully decoded) parameters used in the
signature base sting::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| b5 | =%3D |
| a3 | a |
| c@ | |
| a2 | r b |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_token | kkk9d7dh3k39sjv7 |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_nonce | 7d8f3e4a |
| c2 | |
| a3 | 2 q |
+------------------------+------------------+
Note that the value of "b5" is "=%3D" and not "==". Both "c@" and
"c2" have empty values. While the encoding rules specified in this
specification for the purpose of constructing the signature base
string exclude the use of a "+" character (ASCII code 43) to
represent an encoded space character (ASCII code 32), this practice
is widely used in "application/x-www-form-urlencoded" encoded values,
and MUST be properly decoded, as demonstrated by one of the "a3"
parameter instances (the "a3" parameter is used twice in this
request).
.. _`section 3.4.1.3.1`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3.1
"""
headers = headers or {}
params = []
# The parameters from the following sources are collected into a single
# list of name/value pairs:
# * The query component of the HTTP request URI as defined by
# `RFC3986, Section 3.4`_. The query component is parsed into a list
# of name/value pairs by treating it as an
# "application/x-www-form-urlencoded" string, separating the names
# and values and decoding them as defined by
# `W3C.REC-html40-19980424`_, Section 17.13.4.
#
# .. _`RFC3986, Section 3.4`: http://tools.ietf.org/html/rfc3986#section-3.4
# .. _`W3C.REC-html40-19980424`: http://tools.ietf.org/html/rfc5849#ref-W3C.REC-html40-19980424
if uri_query:
params.extend(urldecode(uri_query))
# * The OAuth HTTP "Authorization" header field (`Section 3.5.1`_) if
# present. The header's content is parsed into a list of name/value
# pairs excluding the "realm" parameter if present. The parameter
# values are decoded as defined by `Section 3.5.1`_.
#
# .. _`Section 3.5.1`: http://tools.ietf.org/html/rfc5849#section-3.5.1
if headers:
headers_lower = dict((k.lower(), v) for k, v in headers.items())
authorization_header = headers_lower.get('authorization')
if authorization_header is not None:
params.extend([i for i in utils.parse_authorization_header(
authorization_header) if with_realm or i[0] != 'realm'])
# * The HTTP request entity-body, but only if all of the following
# conditions are met:
# * The entity-body is single-part.
#
# * The entity-body follows the encoding requirements of the
# "application/x-www-form-urlencoded" content-type as defined by
# `W3C.REC-html40-19980424`_.
# * The HTTP request entity-header includes the "Content-Type"
# header field set to "application/x-www-form-urlencoded".
#
# .._`W3C.REC-html40-19980424`: http://tools.ietf.org/html/rfc5849#ref-W3C.REC-html40-19980424
# TODO: enforce header param inclusion conditions
bodyparams = extract_params(body) or []
params.extend(bodyparams)
# ensure all oauth params are unescaped
unescaped_params = []
for k, v in params:
if k.startswith('oauth_'):
v = utils.unescape(v)
unescaped_params.append((k, v))
# The "oauth_signature" parameter MUST be excluded from the signature
# base string if present.
if exclude_oauth_signature:
unescaped_params = list(filter(lambda i: i[0] != 'oauth_signature',
unescaped_params))
return unescaped_params
def normalize_parameters(params):
"""**Parameters Normalization**
Per `section 3.4.1.3.2`_ of the spec.
For example, the list of parameters from the previous section would
be normalized as follows:
Encoded::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| b5 | %3D%253D |
| a3 | a |
| c%40 | |
| a2 | r%20b |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_token | kkk9d7dh3k39sjv7 |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_nonce | 7d8f3e4a |
| c2 | |
| a3 | 2%20q |
+------------------------+------------------+
Sorted::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| a2 | r%20b |
| a3 | 2%20q |
| a3 | a |
| b5 | %3D%253D |
| c%40 | |
| c2 | |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_nonce | 7d8f3e4a |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_token | kkk9d7dh3k39sjv7 |
+------------------------+------------------+
Concatenated Pairs::
+-------------------------------------+
| Name=Value |
+-------------------------------------+
| a2=r%20b |
| a3=2%20q |
| a3=a |
| b5=%3D%253D |
| c%40= |
| c2= |
| oauth_consumer_key=9djdj82h48djs9d2 |
| oauth_nonce=7d8f3e4a |
| oauth_signature_method=HMAC-SHA1 |
| oauth_timestamp=137131201 |
| oauth_token=kkk9d7dh3k39sjv7 |
+-------------------------------------+
and concatenated together into a single string (line breaks are for
display purposes only)::
a2=r%20b&a3=2%20q&a3=a&b5=%3D%253D&c%40=&c2=&oauth_consumer_key=9dj
dj82h48djs9d2&oauth_nonce=7d8f3e4a&oauth_signature_method=HMAC-SHA1
&oauth_timestamp=137131201&oauth_token=kkk9d7dh3k39sjv7
.. _`section 3.4.1.3.2`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
"""
# The parameters collected in `Section 3.4.1.3`_ are normalized into a
# single string as follows:
#
# .. _`Section 3.4.1.3`: http://tools.ietf.org/html/rfc5849#section-3.4.1.3
# 1. First, the name and value of each parameter are encoded
# (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
key_values = [(utils.escape(k), utils.escape(v)) for k, v in params]
# 2. The parameters are sorted by name, using ascending byte value
# ordering. If two or more parameters share the same name, they
# are sorted by their value.
key_values.sort()
# 3. The name of each parameter is concatenated to its corresponding
# value using an "=" character (ASCII code 61) as a separator, even
# if the value is empty.
parameter_parts = ['{0}={1}'.format(k, v) for k, v in key_values]
# 4. The sorted name/value pairs are concatenated together into a
# single string by using an "&" character (ASCII code 38) as
# separator.
return '&'.join(parameter_parts)
def sign_hmac_sha1_with_client(base_string, client):
return sign_hmac_sha1(base_string,
client.client_secret,
client.resource_owner_secret
)
def sign_hmac_sha1(base_string, client_secret, resource_owner_secret):
"""**HMAC-SHA1**
The "HMAC-SHA1" signature method uses the HMAC-SHA1 signature
algorithm as defined in `RFC2104`_::
digest = HMAC-SHA1 (key, text)
Per `section 3.4.2`_ of the spec.
.. _`RFC2104`: http://tools.ietf.org/html/rfc2104
.. _`section 3.4.2`: http://tools.ietf.org/html/rfc5849#section-3.4.2
"""
# The HMAC-SHA1 function variables are used in following way:
# text is set to the value of the signature base string from
# `Section 3.4.1.1`_.
#
# .. _`Section 3.4.1.1`: http://tools.ietf.org/html/rfc5849#section-3.4.1.1
text = base_string
# key is set to the concatenated values of:
# 1. The client shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
key = utils.escape(client_secret or '')
# 2. An "&" character (ASCII code 38), which MUST be included
# even when either secret is empty.
key += '&'
# 3. The token shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
key += utils.escape(resource_owner_secret or '')
# FIXME: HMAC does not support unicode!
key_utf8 = key.encode('utf-8')
text_utf8 = text.encode('utf-8')
signature = hmac.new(key_utf8, text_utf8, hashlib.sha1)
# digest is used to set the value of the "oauth_signature" protocol
# parameter, after the result octet string is base64-encoded
# per `RFC2045, Section 6.8`.
#
# .. _`RFC2045, Section 6.8`: http://tools.ietf.org/html/rfc2045#section-6.8
return binascii.b2a_base64(signature.digest())[:-1].decode('utf-8')
def sign_rsa_sha1(base_string, rsa_private_key):
"""**RSA-SHA1**
Per `section 3.4.3`_ of the spec.
The "RSA-SHA1" signature method uses the RSASSA-PKCS1-v1_5 signature
algorithm as defined in `RFC3447, Section 8.2`_ (also known as
PKCS#1), using SHA-1 as the hash function for EMSA-PKCS1-v1_5. To
use this method, the client MUST have established client credentials
with the server that included its RSA public key (in a manner that is
beyond the scope of this specification).
NOTE: this method requires the python-rsa library.
.. _`section 3.4.3`: http://tools.ietf.org/html/rfc5849#section-3.4.3
.. _`RFC3447, Section 8.2`: http://tools.ietf.org/html/rfc3447#section-8.2
"""
# TODO: finish RSA documentation
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
key = RSA.importKey(rsa_private_key)
if isinstance(base_string, unicode_type):
base_string = base_string.encode('utf-8')
h = SHA.new(base_string)
p = PKCS1_v1_5.new(key)
return binascii.b2a_base64(p.sign(h))[:-1].decode('utf-8')
def sign_rsa_sha1_with_client(base_string, client):
return sign_rsa_sha1(base_string, client.rsa_key)
def sign_plaintext(client_secret, resource_owner_secret):
"""Sign a request using plaintext.
Per `section 3.4.4`_ of the spec.
The "PLAINTEXT" method does not employ a signature algorithm. It
MUST be used with a transport-layer mechanism such as TLS or SSL (or
sent over a secure channel with equivalent protections). It does not
utilize the signature base string or the "oauth_timestamp" and
"oauth_nonce" parameters.
.. _`section 3.4.4`: http://tools.ietf.org/html/rfc5849#section-3.4.4
"""
# The "oauth_signature" protocol parameter is set to the concatenated
# value of:
# 1. The client shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
signature = utils.escape(client_secret or '')
# 2. An "&" character (ASCII code 38), which MUST be included even
# when either secret is empty.
signature += '&'
# 3. The token shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
signature += utils.escape(resource_owner_secret or '')
return signature
def sign_plaintext_with_client(base_string, client):
return sign_plaintext(client.client_secret, client.resource_owner_secret)
def verify_hmac_sha1(request, client_secret=None,
resource_owner_secret=None):
"""Verify a HMAC-SHA1 signature.
Per `section 3.4`_ of the spec.
.. _`section 3.4`: http://tools.ietf.org/html/rfc5849#section-3.4
To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
attribute MUST be an absolute URI whose netloc part identifies the
origin server or gateway on which the resource resides. Any Host
item of the request argument's headers dict attribute will be
ignored.
.. _`RFC2616 section 5.2`: http://tools.ietf.org/html/rfc2616#section-5.2
"""
norm_params = normalize_parameters(request.params)
uri = normalize_base_string_uri(request.uri)
base_string = construct_base_string(request.http_method, uri, norm_params)
signature = sign_hmac_sha1(base_string, client_secret,
resource_owner_secret)
return safe_string_equals(signature, request.signature)
def verify_rsa_sha1(request, rsa_public_key):
"""Verify a RSASSA-PKCS #1 v1.5 base64 encoded signature.
Per `section 3.4.3`_ of the spec.
Note this method requires the PyCrypto library.
.. _`section 3.4.3`: http://tools.ietf.org/html/rfc5849#section-3.4.3
To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
attribute MUST be an absolute URI whose netloc part identifies the
origin server or gateway on which the resource resides. Any Host
item of the request argument's headers dict attribute will be
ignored.
.. _`RFC2616 section 5.2`: http://tools.ietf.org/html/rfc2616#section-5.2
"""
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA
key = RSA.importKey(rsa_public_key)
norm_params = normalize_parameters(request.params)
uri = normalize_base_string_uri(request.uri)
message = construct_base_string(request.http_method, uri, norm_params)
h = SHA.new(message.encode('utf-8'))
p = PKCS1_v1_5.new(key)
sig = binascii.a2b_base64(request.signature.encode('utf-8'))
return p.verify(h, sig)
def verify_plaintext(request, client_secret=None, resource_owner_secret=None):
"""Verify a PLAINTEXT signature.
Per `section 3.4`_ of the spec.
.. _`section 3.4`: http://tools.ietf.org/html/rfc5849#section-3.4
"""
signature = sign_plaintext(client_secret, resource_owner_secret)
return safe_string_equals(signature, request.signature)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.utils import NumpyRNGContext
from astropy.visualization.interval import (ManualInterval,
MinMaxInterval,
PercentileInterval,
AsymmetricPercentileInterval,
ZScaleInterval)
class TestInterval:
data = np.linspace(-20., 60., 100)
def test_manual(self):
interval = ManualInterval(-10., +15.)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -10.)
np.testing.assert_allclose(vmax, +15.)
def test_manual_defaults(self):
interval = ManualInterval(vmin=-10.)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -10.)
np.testing.assert_allclose(vmax, np.max(self.data))
interval = ManualInterval(vmax=15.)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, np.min(self.data))
np.testing.assert_allclose(vmax, 15.)
def test_manual_zero_limit(self):
# Regression test for a bug that caused ManualInterval to compute the
# limit (min or max) if it was set to zero.
interval = ManualInterval(vmin=0, vmax=0)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, 0)
np.testing.assert_allclose(vmax, 0)
def test_manual_defaults_with_nan(self):
interval = ManualInterval()
data = np.copy(self.data)
data[0] = np.nan
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -20)
np.testing.assert_allclose(vmax, +60)
def test_minmax(self):
interval = MinMaxInterval()
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -20.)
np.testing.assert_allclose(vmax, +60.)
def test_percentile(self):
interval = PercentileInterval(62.2)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -4.88)
np.testing.assert_allclose(vmax, 44.88)
def test_asymmetric_percentile(self):
interval = AsymmetricPercentileInterval(10.5, 70.5)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -11.6)
np.testing.assert_allclose(vmax, 36.4)
def test_asymmetric_percentile_nsamples(self):
with NumpyRNGContext(12345):
interval = AsymmetricPercentileInterval(10.5, 70.5, n_samples=20)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -14.367676767676768)
np.testing.assert_allclose(vmax, 40.266666666666666)
class TestIntervalList(TestInterval):
# Make sure intervals work with lists
data = np.linspace(-20., 60., 100).tolist()
class TestInterval2D(TestInterval):
# Make sure intervals work with 2d arrays
data = np.linspace(-20., 60., 100).reshape(100, 1)
def test_zscale():
np.random.seed(42)
data = np.random.randn(100, 100) * 5 + 10
interval = ZScaleInterval()
vmin, vmax = interval.get_limits(data)
np.testing.assert_allclose(vmin, -9.6, atol=0.1)
np.testing.assert_allclose(vmax, 25.4, atol=0.1)
data = list(range(1000)) + [np.nan]
interval = ZScaleInterval()
vmin, vmax = interval.get_limits(data)
np.testing.assert_allclose(vmin, 0, atol=0.1)
np.testing.assert_allclose(vmax, 999, atol=0.1)
data = list(range(100))
interval = ZScaleInterval()
vmin, vmax = interval.get_limits(data)
np.testing.assert_allclose(vmin, 0, atol=0.1)
np.testing.assert_allclose(vmax, 99, atol=0.1)
def test_zscale_npoints():
"""
Regression test to ensure ZScaleInterval returns the minimum and
maximum of the data if the number of data points is less than
``min_pixels``.
"""
data = np.arange(4).reshape((2, 2))
interval = ZScaleInterval(min_npixels=5)
vmin, vmax = interval.get_limits(data)
assert vmin == 0
assert vmax == 3
def test_integers():
# Need to make sure integers get cast to float
interval = MinMaxInterval()
values = interval([1, 3, 4, 5, 6])
np.testing.assert_allclose(values, [0., 0.4, 0.6, 0.8, 1.0])
# Don't accept integer array in output
out = np.zeros(5, dtype=int)
with pytest.raises(TypeError) as exc:
values = interval([1, 3, 4, 5, 6], out=out)
assert exc.value.args[0] == ("Can only do in-place scaling for "
"floating-point arrays")
# But integer input and floating point output is fine
out = np.zeros(5, dtype=float)
interval([1, 3, 4, 5, 6], out=out)
np.testing.assert_allclose(out, [0., 0.4, 0.6, 0.8, 1.0])
def test_constant_data():
"""Test intervals with constant data (avoiding divide-by-zero)."""
shape = (10, 10)
data = np.ones(shape)
interval = MinMaxInterval()
limits = interval.get_limits(data)
values = interval(data)
np.testing.assert_allclose(limits, (1., 1.))
np.testing.assert_allclose(values, np.zeros(shape))
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* contrib/seg/seg--1.2--1.3.sql */
-- complain if script is sourced in psql, rather than via ALTER EXTENSION
\echo Use "ALTER EXTENSION seg UPDATE TO '1.3'" to load this file. \quit
--
-- Get rid of unnecessary compress and decompress support functions.
--
-- To be allowed to drop the opclass entry for a support function,
-- we must change the entry's dependency type from 'internal' to 'auto',
-- as though it were a loose member of the opfamily rather than being
-- bound into a particular opclass. There's no SQL command for that,
-- so fake it with a manual update on pg_depend.
--
DO LANGUAGE plpgsql
$$
DECLARE
my_schema pg_catalog.text := pg_catalog.quote_ident(pg_catalog.current_schema());
old_path pg_catalog.text := pg_catalog.current_setting('search_path');
BEGIN
-- for safety, transiently set search_path to just pg_catalog+pg_temp
PERFORM pg_catalog.set_config('search_path', 'pg_catalog, pg_temp', true);
UPDATE pg_catalog.pg_depend
SET deptype = 'a'
WHERE classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass
AND objid =
(SELECT objid
FROM pg_catalog.pg_depend
WHERE classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass
AND refclassid = 'pg_catalog.pg_proc'::pg_catalog.regclass
AND (refobjid = (my_schema || '.gseg_compress(internal)')::pg_catalog.regprocedure))
AND refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass
AND deptype = 'i';
UPDATE pg_catalog.pg_depend
SET deptype = 'a'
WHERE classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass
AND objid =
(SELECT objid
FROM pg_catalog.pg_depend
WHERE classid = 'pg_catalog.pg_amproc'::pg_catalog.regclass
AND refclassid = 'pg_catalog.pg_proc'::pg_catalog.regclass
AND (refobjid = (my_schema || '.gseg_decompress(internal)')::pg_catalog.regprocedure))
AND refclassid = 'pg_catalog.pg_opclass'::pg_catalog.regclass
AND deptype = 'i';
PERFORM pg_catalog.set_config('search_path', old_path, true);
END
$$;
ALTER OPERATOR FAMILY gist_seg_ops USING gist drop function 3 (seg);
ALTER EXTENSION seg DROP function gseg_compress(pg_catalog.internal);
DROP function gseg_compress(pg_catalog.internal);
ALTER OPERATOR FAMILY gist_seg_ops USING gist drop function 4 (seg);
ALTER EXTENSION seg DROP function gseg_decompress(pg_catalog.internal);
DROP function gseg_decompress(pg_catalog.internal);
|
sql
|
github
|
https://github.com/postgres/postgres
|
contrib/seg/seg--1.2--1.3.sql
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
"""
BitBake 'Fetch' implementation for svn.
"""
# Copyright (C) 2003, 2004 Chris Larson
# Copyright (C) 2004 Marcin Juszkiewicz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
import os
import sys
import logging
import bb
from bb import data
from bb.fetch import Fetch
from bb.fetch import FetchError
from bb.fetch import MissingParameterError
from bb.fetch import runfetchcmd
from bb.fetch import logger
class Svn(Fetch):
"""Class to fetch a module or modules from svn repositories"""
def supports(self, url, ud, d):
"""
Check to see if a given url can be fetched with svn.
"""
return ud.type in ['svn']
def localpath(self, url, ud, d):
if not "module" in ud.parm:
raise MissingParameterError("svn method needs a 'module' parameter")
ud.module = ud.parm["module"]
# Create paths to svn checkouts
relpath = self._strip_leading_slashes(ud.path)
ud.pkgdir = os.path.join(data.expand('${SVNDIR}', d), ud.host, relpath)
ud.moddir = os.path.join(ud.pkgdir, ud.module)
if 'rev' in ud.parm:
ud.date = ""
ud.revision = ud.parm['rev']
elif 'date' in ud.date:
ud.date = ud.parm['date']
ud.revision = ""
else:
#
# ***Nasty hack***
# If DATE in unexpanded PV, use ud.date (which is set from SRCDATE)
# Should warn people to switch to SRCREV here
#
pv = data.getVar("PV", d, 0)
if "DATE" in pv:
ud.revision = ""
else:
rev = Fetch.srcrev_internal_helper(ud, d)
if rev is True:
ud.revision = self.latest_revision(url, ud, d)
ud.date = ""
elif rev:
ud.revision = rev
ud.date = ""
else:
ud.revision = ""
ud.localfile = data.expand('%s_%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision, ud.date), d)
return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
def _buildsvncommand(self, ud, d, command):
"""
Build up an svn commandline based on ud
command is "fetch", "update", "info"
"""
basecmd = data.expand('${FETCHCMD_svn}', d)
proto = ud.parm.get('proto', 'svn')
svn_rsh = None
if proto == "svn+ssh" and "rsh" in ud.parm:
svn_rsh = ud.parm["rsh"]
svnroot = ud.host + ud.path
# either use the revision, or SRCDATE in braces,
options = []
if ud.user:
options.append("--username %s" % ud.user)
if ud.pswd:
options.append("--password %s" % ud.pswd)
if command is "info":
svncmd = "%s info %s %s://%s/%s/" % (basecmd, " ".join(options), proto, svnroot, ud.module)
else:
suffix = ""
if ud.revision:
options.append("-r %s" % ud.revision)
suffix = "@%s" % (ud.revision)
elif ud.date:
options.append("-r {%s}" % ud.date)
if command is "fetch":
svncmd = "%s co %s %s://%s/%s%s %s" % (basecmd, " ".join(options), proto, svnroot, ud.module, suffix, ud.module)
elif command is "update":
svncmd = "%s update %s" % (basecmd, " ".join(options))
else:
raise FetchError("Invalid svn command %s" % command)
if svn_rsh:
svncmd = "svn_RSH=\"%s\" %s" % (svn_rsh, svncmd)
return svncmd
def go(self, loc, ud, d):
"""Fetch url"""
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
if os.access(os.path.join(ud.moddir, '.svn'), os.R_OK):
svnupdatecmd = self._buildsvncommand(ud, d, "update")
logger.info("Update " + loc)
# update sources there
os.chdir(ud.moddir)
logger.debug(1, "Running %s", svnupdatecmd)
runfetchcmd(svnupdatecmd, d)
else:
svnfetchcmd = self._buildsvncommand(ud, d, "fetch")
logger.info("Fetch " + loc)
# check out sources there
bb.mkdirhier(ud.pkgdir)
os.chdir(ud.pkgdir)
logger.debug(1, "Running %s", svnfetchcmd)
runfetchcmd(svnfetchcmd, d)
scmdata = ud.parm.get("scmdata", "")
if scmdata == "keep":
tar_flags = ""
else:
tar_flags = "--exclude '.svn'"
os.chdir(ud.pkgdir)
# tar them up to a defined filename
try:
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, ud.module), d)
except:
t, v, tb = sys.exc_info()
try:
os.unlink(ud.localpath)
except OSError:
pass
raise t, v, tb
def supports_srcrev(self):
return True
def _revision_key(self, url, ud, d):
"""
Return a unique key for the url
"""
return "svn:" + ud.moddir
def _latest_revision(self, url, ud, d):
"""
Return the latest upstream revision number
"""
logger.debug(2, "SVN fetcher hitting network for %s", url)
output = runfetchcmd("LANG=C LC_ALL=C " + self._buildsvncommand(ud, d, "info"), d, True)
revision = None
for line in output.splitlines():
if "Last Changed Rev" in line:
revision = line.split(":")[1].strip()
return revision
def _sortable_revision(self, url, ud, d):
"""
Return a sortable revision number which in our case is the revision number
"""
return self._build_revision(url, ud, d)
def _build_revision(self, url, ud, d):
return ud.revision
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright (C) Igor Sysoev
* Copyright (C) Nginx, Inc.
*/
#include <ngx_config.h>
#include <ngx_core.h>
#include <ngx_http.h>
#include <ngx_md5.h>
typedef struct {
ngx_http_complex_value_t *variable;
ngx_http_complex_value_t *md5;
ngx_str_t secret;
} ngx_http_secure_link_conf_t;
typedef struct {
ngx_str_t expires;
} ngx_http_secure_link_ctx_t;
static ngx_int_t ngx_http_secure_link_old_variable(ngx_http_request_t *r,
ngx_http_secure_link_conf_t *conf, ngx_http_variable_value_t *v,
uintptr_t data);
static ngx_int_t ngx_http_secure_link_expires_variable(ngx_http_request_t *r,
ngx_http_variable_value_t *v, uintptr_t data);
static void *ngx_http_secure_link_create_conf(ngx_conf_t *cf);
static char *ngx_http_secure_link_merge_conf(ngx_conf_t *cf, void *parent,
void *child);
static ngx_int_t ngx_http_secure_link_add_variables(ngx_conf_t *cf);
static ngx_command_t ngx_http_secure_link_commands[] = {
{ ngx_string("secure_link"),
NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1,
ngx_http_set_complex_value_slot,
NGX_HTTP_LOC_CONF_OFFSET,
offsetof(ngx_http_secure_link_conf_t, variable),
NULL },
{ ngx_string("secure_link_md5"),
NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1,
ngx_http_set_complex_value_slot,
NGX_HTTP_LOC_CONF_OFFSET,
offsetof(ngx_http_secure_link_conf_t, md5),
NULL },
{ ngx_string("secure_link_secret"),
NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1,
ngx_conf_set_str_slot,
NGX_HTTP_LOC_CONF_OFFSET,
offsetof(ngx_http_secure_link_conf_t, secret),
NULL },
ngx_null_command
};
static ngx_http_module_t ngx_http_secure_link_module_ctx = {
ngx_http_secure_link_add_variables, /* preconfiguration */
NULL, /* postconfiguration */
NULL, /* create main configuration */
NULL, /* init main configuration */
NULL, /* create server configuration */
NULL, /* merge server configuration */
ngx_http_secure_link_create_conf, /* create location configuration */
ngx_http_secure_link_merge_conf /* merge location configuration */
};
ngx_module_t ngx_http_secure_link_module = {
NGX_MODULE_V1,
&ngx_http_secure_link_module_ctx, /* module context */
ngx_http_secure_link_commands, /* module directives */
NGX_HTTP_MODULE, /* module type */
NULL, /* init master */
NULL, /* init module */
NULL, /* init process */
NULL, /* init thread */
NULL, /* exit thread */
NULL, /* exit process */
NULL, /* exit master */
NGX_MODULE_V1_PADDING
};
static ngx_str_t ngx_http_secure_link_name = ngx_string("secure_link");
static ngx_str_t ngx_http_secure_link_expires_name =
ngx_string("secure_link_expires");
static ngx_int_t
ngx_http_secure_link_variable(ngx_http_request_t *r,
ngx_http_variable_value_t *v, uintptr_t data)
{
u_char *p, *last;
ngx_str_t val, hash;
time_t expires;
ngx_md5_t md5;
ngx_http_secure_link_ctx_t *ctx;
ngx_http_secure_link_conf_t *conf;
u_char hash_buf[18], md5_buf[16];
conf = ngx_http_get_module_loc_conf(r, ngx_http_secure_link_module);
if (conf->secret.data) {
return ngx_http_secure_link_old_variable(r, conf, v, data);
}
if (conf->variable == NULL || conf->md5 == NULL) {
goto not_found;
}
if (ngx_http_complex_value(r, conf->variable, &val) != NGX_OK) {
return NGX_ERROR;
}
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
"secure link: \"%V\"", &val);
last = val.data + val.len;
p = ngx_strlchr(val.data, last, ',');
expires = 0;
if (p) {
val.len = p++ - val.data;
expires = ngx_atotm(p, last - p);
if (expires <= 0) {
goto not_found;
}
ctx = ngx_pcalloc(r->pool, sizeof(ngx_http_secure_link_ctx_t));
if (ctx == NULL) {
return NGX_ERROR;
}
ngx_http_set_ctx(r, ctx, ngx_http_secure_link_module);
ctx->expires.len = last - p;
ctx->expires.data = p;
}
if (val.len > 24) {
goto not_found;
}
hash.data = hash_buf;
if (ngx_decode_base64url(&hash, &val) != NGX_OK) {
goto not_found;
}
if (hash.len != 16) {
goto not_found;
}
if (ngx_http_complex_value(r, conf->md5, &val) != NGX_OK) {
return NGX_ERROR;
}
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
"secure link md5: \"%V\"", &val);
ngx_md5_init(&md5);
ngx_md5_update(&md5, val.data, val.len);
ngx_md5_final(md5_buf, &md5);
if (ngx_memcmp(hash_buf, md5_buf, 16) != 0) {
goto not_found;
}
v->data = (u_char *) ((expires && expires < ngx_time()) ? "0" : "1");
v->len = 1;
v->valid = 1;
v->no_cacheable = 0;
v->not_found = 0;
return NGX_OK;
not_found:
v->not_found = 1;
return NGX_OK;
}
static ngx_int_t
ngx_http_secure_link_old_variable(ngx_http_request_t *r,
ngx_http_secure_link_conf_t *conf, ngx_http_variable_value_t *v,
uintptr_t data)
{
u_char *p, *start, *end, *last;
size_t len;
ngx_int_t n;
ngx_uint_t i;
ngx_md5_t md5;
u_char hash[16];
p = &r->unparsed_uri.data[1];
last = r->unparsed_uri.data + r->unparsed_uri.len;
while (p < last) {
if (*p++ == '/') {
start = p;
goto md5_start;
}
}
goto not_found;
md5_start:
while (p < last) {
if (*p++ == '/') {
end = p - 1;
goto url_start;
}
}
goto not_found;
url_start:
len = last - p;
if (end - start != 32 || len == 0) {
goto not_found;
}
ngx_md5_init(&md5);
ngx_md5_update(&md5, p, len);
ngx_md5_update(&md5, conf->secret.data, conf->secret.len);
ngx_md5_final(hash, &md5);
for (i = 0; i < 16; i++) {
n = ngx_hextoi(&start[2 * i], 2);
if (n == NGX_ERROR || n != hash[i]) {
goto not_found;
}
}
v->len = len;
v->valid = 1;
v->no_cacheable = 0;
v->not_found = 0;
v->data = p;
return NGX_OK;
not_found:
v->not_found = 1;
return NGX_OK;
}
static ngx_int_t
ngx_http_secure_link_expires_variable(ngx_http_request_t *r,
ngx_http_variable_value_t *v, uintptr_t data)
{
ngx_http_secure_link_ctx_t *ctx;
ctx = ngx_http_get_module_ctx(r, ngx_http_secure_link_module);
if (ctx) {
v->len = ctx->expires.len;
v->valid = 1;
v->no_cacheable = 0;
v->not_found = 0;
v->data = ctx->expires.data;
} else {
v->not_found = 1;
}
return NGX_OK;
}
static void *
ngx_http_secure_link_create_conf(ngx_conf_t *cf)
{
ngx_http_secure_link_conf_t *conf;
conf = ngx_pcalloc(cf->pool, sizeof(ngx_http_secure_link_conf_t));
if (conf == NULL) {
return NULL;
}
/*
* set by ngx_pcalloc():
*
* conf->secret = { 0, NULL };
*/
conf->variable = NGX_CONF_UNSET_PTR;
conf->md5 = NGX_CONF_UNSET_PTR;
return conf;
}
static char *
ngx_http_secure_link_merge_conf(ngx_conf_t *cf, void *parent, void *child)
{
ngx_http_secure_link_conf_t *prev = parent;
ngx_http_secure_link_conf_t *conf = child;
if (conf->secret.data) {
ngx_conf_init_ptr_value(conf->variable, NULL);
ngx_conf_init_ptr_value(conf->md5, NULL);
if (conf->variable || conf->md5) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"\"secure_link_secret\" cannot be mixed with "
"\"secure_link\" and \"secure_link_md5\"");
return NGX_CONF_ERROR;
}
return NGX_CONF_OK;
}
ngx_conf_merge_ptr_value(conf->variable, prev->variable, NULL);
ngx_conf_merge_ptr_value(conf->md5, prev->md5, NULL);
if (conf->variable == NULL && conf->md5 == NULL) {
conf->secret = prev->secret;
}
return NGX_CONF_OK;
}
static ngx_int_t
ngx_http_secure_link_add_variables(ngx_conf_t *cf)
{
ngx_http_variable_t *var;
var = ngx_http_add_variable(cf, &ngx_http_secure_link_name, 0);
if (var == NULL) {
return NGX_ERROR;
}
var->get_handler = ngx_http_secure_link_variable;
var = ngx_http_add_variable(cf, &ngx_http_secure_link_expires_name, 0);
if (var == NULL) {
return NGX_ERROR;
}
var->get_handler = ngx_http_secure_link_expires_variable;
return NGX_OK;
}
|
c
|
github
|
https://github.com/nginx/nginx
|
src/http/modules/ngx_http_secure_link_module.c
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
urlutils.py -- helper functions for URL related problems such as
argument washing, redirection, etc.
"""
__revision__ = "$Id$"
import time
import base64
import hmac
import re
import sys
import os
import inspect
import urllib
import urllib2
from urllib import urlencode, quote_plus, quote, FancyURLopener
from six.moves.urllib.parse import urlparse, urlunparse
from cgi import parse_qs, parse_qsl, escape
from werkzeug import cached_property
from werkzeug.local import LocalProxy
try:
import BeautifulSoup
BEAUTIFUL_SOUP_IMPORTED = True
except ImportError:
BEAUTIFUL_SOUP_IMPORTED = False
from invenio.base.globals import cfg
from invenio.utils.hash import sha1, md5, HASHLIB_IMPORTED
from invenio.utils.text import wash_for_utf8
from invenio.utils import apache
def wash_url_argument(var, new_type):
"""
Wash argument into 'new_type', that can be 'list', 'str',
'int', 'tuple' or 'dict'.
If needed, the check 'type(var) is not None' should be done before
calling this function.
@param var: variable value
@param new_type: variable type, 'list', 'str', 'int', 'tuple' or 'dict'
@return: as much as possible, value var as type new_type
If var is a list, will change first element into new_type.
If int check unsuccessful, returns 0
"""
out = []
if new_type == 'list': # return lst
if isinstance(var, list):
out = var
else:
out = [var]
elif new_type == 'str': # return str
if isinstance(var, list):
try:
out = "%s" % var[0]
except:
out = ""
elif isinstance(var, str):
out = var
else:
out = "%s" % var
elif new_type == 'int': # return int
if isinstance(var, list):
try:
out = int(var[0])
except:
out = 0
elif isinstance(var, (int, long)):
out = var
elif isinstance(var, str):
try:
out = int(var)
except:
out = 0
else:
out = 0
elif new_type == 'tuple': # return tuple
if isinstance(var, tuple):
out = var
else:
out = (var, )
elif new_type == 'dict': # return dictionary
if isinstance(var, dict):
out = var
else:
out = {0: var}
return out
from urlparse import urlparse, urljoin
from flask import request, url_for
def is_local_url(target):
"""Determine if URL is a local."""
ref_url = urlparse(cfg.get('CFG_SITE_SECURE_URL'))
test_url = urlparse(urljoin(cfg.get('CFG_SITE_SECURE_URL'), target))
return test_url.scheme in ('http', 'https') and \
ref_url.netloc == test_url.netloc
def get_safe_redirect_target(arg='next'):
"""Get URL to redirect to and ensure that it is local."""
for target in request.args.get(arg), request.referrer:
if not target:
continue
if is_local_url(target):
return target
return None
def redirect_to_url(req, url, redirection_type=None, norobot=False):
"""
Redirect current page to url.
@param req: request as received from apache
@param url: url to redirect to
@param redirection_type: what kind of redirection is required:
e.g.: apache.HTTP_MULTIPLE_CHOICES = 300
apache.HTTP_MOVED_PERMANENTLY = 301
apache.HTTP_MOVED_TEMPORARILY = 302
apache.HTTP_SEE_OTHER = 303
apache.HTTP_NOT_MODIFIED = 304
apache.HTTP_USE_PROXY = 305
apache.HTTP_TEMPORARY_REDIRECT = 307
The default is apache.HTTP_MOVED_TEMPORARILY
@param norobot: wether to instruct crawlers and robots such as GoogleBot
not to index past this point.
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3
"""
url = url.strip()
if redirection_type is None:
redirection_type = apache.HTTP_MOVED_TEMPORARILY
from flask import redirect
r = redirect(url, code=redirection_type)
raise apache.SERVER_RETURN, r
#FIXME enable code bellow
del req.headers_out["Cache-Control"]
req.headers_out["Cache-Control"] = "no-cache, private, no-store, " \
"must-revalidate, post-check=0, pre-check=0, max-age=0"
req.headers_out["Pragma"] = "no-cache"
if norobot:
req.headers_out["X-Robots-Tag"] = "noarchive, nosnippet, noindex, nocache"
user_agent = req.headers_in.get('User-Agent', '')
if 'Microsoft Office Existence Discovery' in user_agent or 'ms-office' in user_agent:
## HACK: this is to workaround Microsoft Office trying to be smart
## when users click on URLs in Office documents that require
## authentication. Office will check the validity of the URL
## but will pass the browser the redirected URL rather than
## the original one. This is incompatible with e.g. Shibboleth
## based SSO since the referer would be lost.
## See: http://support.microsoft.com/kb/899927
req.status = 200
req.content_type = 'text/html'
if req.method != 'HEAD':
req.write("""
<html>
<head>
<title>Intermediate page for URLs clicked on MS Office Documents</title>
<meta http-equiv="REFRESH" content="5;url=%(url)s"></meta>
</head>
<body>
<p>You are going to be redirected to the desired content within 5 seconds. If the redirection does not happen automatically please click on <a href="%(url)s">%(url_ok)s</a>.</p>
</body>
</html>""" % {
'url': escape(req.unparsed_uri, True),
'url_ok': escape(req.unparsed_uri)
})
raise apache.SERVER_RETURN(apache.DONE)
req.headers_out["Location"] = url
if req.response_sent_p:
raise IOError("Cannot redirect after headers have already been sent.")
req.status = redirection_type
req.write('<p>Please go to <a href="%s">here</a></p>\n' % url)
raise apache.SERVER_RETURN, apache.DONE
def rewrite_to_secure_url(url, secure_base=None):
"""
Rewrite URL to a Secure URL
@param url URL to be rewritten to a secure URL.
@param secure_base: Base URL of secure site (defaults to CFG_SITE_SECURE_URL).
"""
if secure_base is None:
secure_base = cfg.get('CFG_SITE_SECURE_URL')
url_parts = list(urlparse(url))
url_secure_parts = urlparse(secure_base)
url_parts[0] = url_secure_parts[0]
url_parts[1] = url_secure_parts[1]
return urlunparse(url_parts)
def get_referer(req, replace_ampersands=False):
""" Return the referring page of a request.
Referer (wikipedia): Referer is a common misspelling of the word
"referrer"; so common, in fact, that it made it into the official
specification of HTTP. When visiting a webpage, the referer or
referring page is the URL of the previous webpage from which a link was
followed.
@param req: request
@param replace_ampersands: if 1, replace & by & in url
(correct HTML cannot contain & characters alone)
"""
try:
referer = req.headers_in['Referer']
if replace_ampersands == 1:
return referer.replace('&', '&')
return referer
except KeyError:
return ''
def drop_default_urlargd(urlargd, default_urlargd):
lndefault = {}
lndefault.update(default_urlargd)
## Commented out. An Invenio URL now should always specify the desired
## language, in order not to raise the automatic language discovery
## (client browser language can be used now in place of CFG_SITE_LANG)
# lndefault['ln'] = (str, CFG_SITE_LANG)
canonical = {}
canonical.update(urlargd)
for k, v in urlargd.items():
try:
d = lndefault[k]
if d[1] == v:
del canonical[k]
except KeyError:
pass
return canonical
def make_canonical_urlargd(urlargd, default_urlargd):
""" Build up the query part of an URL from the arguments passed in
the 'urlargd' dictionary. 'default_urlargd' is a secondary dictionary which
contains tuples of the form (type, default value) for the query
arguments (this is the same dictionary as the one you can pass to
webinterface_handler.wash_urlargd).
When a query element has its default value, it is discarded, so
that the simplest (canonical) url query is returned.
The result contains the initial '?' if there are actual query
items remaining.
"""
canonical = drop_default_urlargd(urlargd, default_urlargd)
if canonical:
return '?' + urlencode(canonical, doseq=True)
#FIXME double escaping of '&'? .replace('&', '&')
return ''
def create_html_link(urlbase, urlargd, link_label, linkattrd=None,
escape_urlargd=True, escape_linkattrd=True,
urlhash=None):
"""Creates a W3C compliant link.
@param urlbase: base url (e.g. invenio.config.CFG_SITE_URL/search)
@param urlargd: dictionary of parameters. (e.g. p={'recid':3, 'of'='hb'})
@param link_label: text displayed in a browser (has to be already escaped)
@param linkattrd: dictionary of attributes (e.g. a={'class': 'img'})
@param escape_urlargd: boolean indicating if the function should escape
arguments (e.g. < becomes < or " becomes ")
@param escape_linkattrd: boolean indicating if the function should escape
attributes (e.g. < becomes < or " becomes ")
@param urlhash: hash string to add at the end of the link
"""
attributes_separator = ' '
output = '<a href="' + \
create_url(urlbase, urlargd, escape_urlargd, urlhash) + '"'
if linkattrd:
output += ' '
if escape_linkattrd:
attributes = [escape(str(key), quote=True) + '="' + \
escape(str(linkattrd[key]), quote=True) + '"'
for key in linkattrd.keys()]
else:
attributes = [str(key) + '="' + str(linkattrd[key]) + '"'
for key in linkattrd.keys()]
output += attributes_separator.join(attributes)
output = wash_for_utf8(output)
output += '>' + wash_for_utf8(link_label) + '</a>'
return output
def create_html_mailto(email, subject=None, body=None, cc=None, bcc=None,
link_label="%(email)s", linkattrd=None, escape_urlargd=True,
escape_linkattrd=True, email_obfuscation_mode=None):
"""Creates a W3C compliant 'mailto' link.
Encode/encrypt given email to reduce undesired automated email
harvesting when embedded in a web page.
NOTE: there is no ultimate solution to protect against email
harvesting. All have drawbacks and can more or less be
circumvented. There are other techniques to protect email
addresses. We implement the less annoying one for users.
@param email: the recipient of the email
@param subject: a default subject for the email (must not contain
line feeds)
@param body: a default body for the email
@param cc: the co-recipient(s) of the email
@param bcc: the hidden co-recpient(s) of the email
@param link_label: the label of this mailto link. String
replacement is performed on key %(email)s with
the email address if needed.
@param linkattrd: dictionary of attributes (e.g. a={'class': 'img'})
@param escape_urlargd: boolean indicating if the function should escape
arguments (e.g. < becomes < or " becomes ")
@param escape_linkattrd: boolean indicating if the function should escape
attributes (e.g. < becomes < or " becomes ")
@param email_obfuscation_mode: the protection mode. See below:
You can choose among several modes to protect emails. It is
advised to keep the default
CFG_MISCUTIL_EMAIL_HARVESTING_PROTECTION value, so that it is
possible for an admin to change the policy globally.
Available modes ([t] means "transparent" for the user):
-1: hide all emails, excepted CFG_SITE_ADMIN_EMAIL and
CFG_SITE_SUPPORT_EMAIL.
[t] 0 : no protection, email returned as is.
foo@example.com => foo@example.com
1 : basic email munging: replaces @ by [at] and . by [dot]
foo@example.com => foo [at] example [dot] com
[t] 2 : transparent name mangling: characters are replaced by
equivalent HTML entities.
foo@example.com => foo@example.com
[t] 3 : javascript insertion. Requires Javascript enabled on client side.
4 : replaces @ and . characters by gif equivalents.
foo@example.com => foo<img src="at.gif" alt=" [at] ">example<img src="dot.gif" alt=" [dot] ">com
"""
# TODO: implement other protection modes to encode/encript email:
#
## [t] 5 : form submission. User is redirected to a form that he can
## fills in to send the email (??Use webmessage??).
## Depending on WebAccess, ask to answer a question.
##
## [t] 6 : if user can see (controlled by WebAccess), display. Else
## ask to login to see email. If user cannot see, display
## form submission.
if email_obfuscation_mode is None:
email_obfuscation_mode = cfg.get(
'CFG_WEBSTYLE_EMAIL_ADDRESSES_OBFUSCATION_MODE')
if linkattrd is None:
linkattrd = {}
parameters = {}
if subject:
parameters["subject"] = subject
if body:
parameters["body"] = body.replace('\r\n', '\n').replace('\n', '\r\n')
if cc:
parameters["cc"] = cc
if bcc:
parameters["bcc"] = bcc
# Preprocessing values for some modes
if email_obfuscation_mode == 1:
# Basic Munging
email = email.replace("@", " [at] ").replace(".", " [dot] ")
elif email_obfuscation_mode == 2:
# Transparent name mangling
email = string_to_numeric_char_reference(email)
if '%(email)s' in link_label:
link_label = link_label % {'email': email}
mailto_link = create_html_link('mailto:' + email, parameters,
link_label, linkattrd,
escape_urlargd, escape_linkattrd)
if email_obfuscation_mode == 0:
# Return "as is"
return mailto_link
elif email_obfuscation_mode == 1:
# Basic Munging
return mailto_link
elif email_obfuscation_mode == 2:
# Transparent name mangling
return mailto_link
elif email_obfuscation_mode == 3:
# Javascript-based
return '''<script language="JavaScript" ''' \
'''type="text/javascript">''' \
'''document.write('%s'.split("").reverse().join(""))''' \
'''</script>''' % \
mailto_link[::-1].replace("'", "\\'")
elif email_obfuscation_mode == 4:
# GIFs-based
email = email.replace('.',
'<img src="%s/img/dot.gif" alt=" [dot] " '
'style="vertical-align:bottom" />' % cfg.get('CFG_SITE_URL'))
email = email.replace('@',
'<img src="%s/img/at.gif" alt=" [at] " '
'style="vertical-align:baseline" />' % cfg.get('CFG_SITE_URL'))
return email
# All other cases, including mode -1:
return ""
def string_to_numeric_char_reference(string):
"""
Encode a string to HTML-compatible numeric character reference.
Eg: encode_html_entities("abc") == 'abc'
"""
out = ""
for char in string:
out += "&#" + str(ord(char)) + ";"
return out
def get_canonical_and_alternates_urls(url, drop_ln=True, washed_argd=None, quote_path=False):
"""
Given an Invenio URL returns a tuple with two elements. The first is the
canonical URL, that is the original URL with CFG_SITE_URL prefix, and
where the ln= argument stripped. The second element element is mapping,
language code -> alternate URL
@param quote_path: if True, the path section of the given C{url}
is quoted according to RFC 2396
"""
dummy_scheme, dummy_netloc, path, dummy_params, query, fragment = urlparse(url)
canonical_scheme, canonical_netloc = urlparse(cfg.get('CFG_SITE_URL'))[0:2]
parsed_query = washed_argd or parse_qsl(query)
no_ln_parsed_query = [(key, value) for (key, value) in parsed_query if key != 'ln']
if drop_ln:
canonical_parsed_query = no_ln_parsed_query
else:
canonical_parsed_query = parsed_query
if quote_path:
path = urllib.quote(path)
canonical_query = urlencode(canonical_parsed_query)
canonical_url = urlunparse((canonical_scheme, canonical_netloc, path, dummy_params, canonical_query, fragment))
alternate_urls = {}
for ln in cfg.get('CFG_SITE_LANGS'):
alternate_query = urlencode(no_ln_parsed_query + [('ln', ln)])
alternate_url = urlunparse((canonical_scheme, canonical_netloc, path, dummy_params, alternate_query, fragment))
alternate_urls[ln] = alternate_url
return canonical_url, alternate_urls
def create_url(urlbase, urlargd, escape_urlargd=True, urlhash=None):
"""Creates a W3C compliant URL. Output will look like this:
'urlbase?param1=value1&param2=value2'
@param urlbase: base url (e.g. invenio.config.CFG_SITE_URL/search)
@param urlargd: dictionary of parameters. (e.g. p={'recid':3, 'of'='hb'}
@param escape_urlargd: boolean indicating if the function should escape
arguments (e.g. < becomes < or " becomes ")
@param urlhash: hash string to add at the end of the link
"""
separator = '&'
output = urlbase
if urlargd:
output += '?'
if escape_urlargd:
arguments = [escape(quote(str(key)), quote=True) + '=' + \
escape(quote(str(urlargd[key])), quote=True)
for key in urlargd.keys()]
else:
arguments = [str(key) + '=' + str(urlargd[key])
for key in urlargd.keys()]
output += separator.join(arguments)
if urlhash:
output += "#" + escape(quote(str(urlhash)))
return output
def same_urls_p(a, b):
""" Compare two URLs, ignoring reorganizing of query arguments """
ua = list(urlparse(a))
ub = list(urlparse(b))
ua[4] = parse_qs(ua[4])
ub[4] = parse_qs(ub[4])
return ua == ub
def urlargs_replace_text_in_arg(urlargs, regexp_argname, text_old, text_new):
"""Analyze `urlargs' (URL CGI GET query arguments in string form)
and for each occurrence of argument matching `regexp_argname'
replace every substring `text_old' by `text_new'. Return the
resulting new URL.
Used to be used for search engine's create_nearest_terms_box,
now it is not used there anymore. It is left here in case it
will become possibly useful later.
"""
out = ""
# parse URL arguments into a dictionary:
urlargsdict = parse_qs(urlargs)
## construct new URL arguments:
urlargsdictnew = {}
for key in urlargsdict.keys():
if re.match(regexp_argname, key): # replace `arg' by new values
urlargsdictnew[key] = []
for parg in urlargsdict[key]:
urlargsdictnew[key].append(parg.replace(text_old, text_new))
else: # keep old values
urlargsdictnew[key] = urlargsdict[key]
# build new URL for this word:
for key in urlargsdictnew.keys():
for val in urlargsdictnew[key]:
out += "&" + key + "=" + quote_plus(val, '')
if out.startswith("&"):
out = out[5:]
return out
def get_title_of_page(url):
"""
@param url: page to get the title from
@return: the page title in utf-8 or None in case
that any kind of exception occured e.g. connection error,
URL not known
"""
if BEAUTIFUL_SOUP_IMPORTED:
try:
opener = make_invenio_opener('UrlUtils')
soup = BeautifulSoup.BeautifulSoup(opener.open(url))
return soup.title.string.encode("utf-8")
except:
return None
else:
return "Title not available"
def make_user_agent_string(component=None):
"""
Return a nice and uniform user-agent string to be used when Invenio
act as a client in HTTP requests.
"""
ret = "Invenio-%s (+%s; \"%s\")" % (cfg.get('CFG_VERSION'), cfg.get('CFG_SITE_URL'), cfg.get('CFG_SITE_NAME'))
if component:
ret += " %s" % component
return ret
class InvenioFancyURLopener(FancyURLopener):
"""Provide default user agent string."""
@cached_property
def version(self):
return make_user_agent_string()
def prompt_user_passwd(self, host, realm):
"""Don't prompt"""
return None, None
# Let's override default useragent string
# See: http://docs.python.org/release/2.4.4/lib/module-urllib.html
urllib._urlopener = LocalProxy(lambda: InvenioFancyURLopener())
def make_invenio_opener(component=None):
"""
Return an urllib2 opener with the useragent already set in the appropriate
way.
"""
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', make_user_agent_string(component))]
return opener
def create_AWS_request_url(base_url, argd, _amazon_secret_access_key,
_timestamp=None):
"""
Create a signed AWS (Amazon Web Service) request URL corresponding
to the given parameters.
Example:
>> create_AWS_request_url("http://ecs.amazon.com/onca/xml",
{'AWSAccessKeyID': '0000000000',
'Service': 'AWSECommerceService',
'Operation': 'ItemLookup',
'ItemID': '0679722769',
'ResponseGroup': 'ItemAttributes,Offers,Images,Review'},
"1234567890")
@param base_url: Service URL of the Amazon store to query
@param argd: dictionary of arguments defining the query
@param _amazon_secret_access_key: your Amazon secret key
@param _timestamp: for testing purpose only (default: current timestamp)
@type base_url: string
@type argd: dict
@type _amazon_secret_access_key: string
@type _timestamp: string
@return signed URL of the request (string)
"""
## First define a few util functions
def get_AWS_signature(argd, _amazon_secret_access_key,
method="GET", request_host="webservices.amazon.com",
request_uri="/onca/xml",
_timestamp=None):
"""
Returns the signature of an Amazon request, based on the
arguments of the request.
@param argd: dictionary of arguments defining the query
@param _amazon_secret_access_key: your Amazon secret key
@param method: method of the request POST or GET
@param request_host: host contacted for the query. To embed in the signature.
@param request_uri: uri contacted at 'request_host'. To embed in the signature.
@param _timestamp: for testing purpose only (default: current timestamp)
@type argd: dict
@type _amazon_secret_access_key: string
@type method: string
@type host_header: string
@type http_request_uri: string
@type _timestamp: string
@return signature of the request (string)
"""
# Add timestamp
if not _timestamp:
argd["Timestamp"] = time.strftime("%Y-%m-%dT%H:%M:%SZ",
time.gmtime())
else:
argd["Timestamp"] = _timestamp
# Order parameter keys by byte value
parameter_keys = argd.keys()
parameter_keys.sort()
# Encode arguments, according to RFC 3986. Make sure we
# generate a list which is ordered by byte value of the keys
arguments = [quote(str(key), safe="~/") + "=" + \
quote(str(argd[key]), safe="~/") \
for key in parameter_keys]
# Join
parameters_string = "&".join(arguments)
# Prefix
parameters_string = method.upper() + "\n" + \
request_host.lower() + "\n" + \
(request_uri or "/") + "\n" + \
parameters_string
# Sign and return
return calculate_RFC2104_HMAC(parameters_string,
_amazon_secret_access_key)
def calculate_RFC2104_HMAC(data, _amazon_secret_access_key):
"""
Computes a RFC 2104 compliant HMAC Signature and then Base64
encodes it.
Module hashlib must be installed if Python < 2.5
<http://pypi.python.org/pypi/hashlib/20081119>
@param data: data to sign
@param _amazon_secret_access_key: your Amazon secret key
@type data: string
@type _amazon_secret_access_key: string. Empty if hashlib module not installed
"""
if not HASHLIB_IMPORTED:
try:
raise Exception("Module hashlib not installed. Please install it.")
except:
from invenio.ext.logging import register_exception
register_exception(stream='warning', alert_admin=True, subject='Cannot create AWS signature')
return ""
else:
if sys.version_info < (2, 5):
# compatibility mode for Python < 2.5 and hashlib
my_digest_algo = _MySHA256(sha256())
else:
my_digest_algo = sha256
return base64.encodestring(hmac.new(_amazon_secret_access_key,
data, my_digest_algo).digest()).strip()
## End util functions
parsed_url = urlparse(base_url)
signature = get_AWS_signature(argd, _amazon_secret_access_key,
request_host=parsed_url[1],
request_uri=parsed_url[2],
_timestamp=_timestamp)
if signature:
argd["Signature"] = signature
return base_url + "?" + urlencode(argd)
def create_Indico_request_url(base_url, indico_what, indico_loc, indico_id, indico_type, indico_params, indico_key, indico_sig, _timestamp=None):
"""
Create a signed Indico request URL to access Indico HTTP Export APIs.
See U{http://indico.cern.ch/ihelp/html/ExportAPI/index.html} for more
information.
Example:
>> create_Indico_request_url("https://indico.cern.ch",
"categ",
"",
[1, 7],
"xml",
{'onlypublic': 'yes',
'order': 'title',
'from': 'today',
'to': 'tomorrow'},
'00000000-0000-0000-0000-000000000000',
'00000000-0000-0000-0000-000000000000')
@param base_url: Service base URL of the Indico instance to query
@param indico_what: element to export
@type indico_what: one of the strings: C{categ}, C{event}, C{room}, C{reservation}
@param indico_loc: location of the element(s) specified by ID (only used for some elements)
@param indico_id: ID of the element to be exported
@type indico_id: a string or a list/tuple of strings
@param indico_type: output format
@type indico_type: one of the strings: C{json}, C{jsonp}, C{xml}, C{html}, C{ics}, C{atom}
@param indico_params: parameters of the query. See U{http://indico.cern.ch/ihelp/html/ExportAPI/common.html}
@param indico_key: API key provided for the given Indico instance
@param indico_sig: API secret key (signature) provided for the given Indico instance
@param _timestamp: for testing purpose only (default: current timestamp)
@return signed URL of the request (string)
"""
url = '/export/' + indico_what + '/'
if indico_loc:
url += indico_loc + '/'
if type(indico_id) in (list, tuple):
# dash separated list of values
indico_id = '-'.join([str(x) for x in indico_id])
url += indico_id + '.' + str(indico_type)
if hasattr(indico_params, 'items'):
items = indico_params.items()
else:
items = list(indico_params)
if indico_key:
items.append(('apikey', indico_key))
if indico_sig and HASHLIB_IMPORTED:
if _timestamp:
items.append(('timestamp', str(_timestamp)))
else:
items.append(('timestamp', str(int(time.time()))))
items = sorted(items, key=lambda x: x[0].lower())
url_to_sign = '%s?%s' % (url, urlencode(items))
if sys.version_info < (2, 5):
# compatibility mode for Python < 2.5 and hashlib
my_digest_algo = _MySHA1(sha1())
else:
my_digest_algo = sha1
signature = hmac.new(indico_sig, url_to_sign, my_digest_algo).hexdigest()
items.append(('signature', signature))
elif not HASHLIB_IMPORTED:
try:
raise Exception("Module hashlib not installed. Please install it.")
except:
from invenio.ext.logging import register_exception
register_exception(stream='warning', alert_admin=True, subject='Cannot create AWS signature')
if not items:
return url
url = '%s%s?%s' % (base_url.strip('/'), url, urlencode(items))
return url
class _MyHashlibAlgo(object):
'''
Define a subclass of any hashlib algorithm class, with an additional "new()"
function, to work with the Python < 2.5 version of the hmac module.
(This class is more complex than it should, but it is not
possible to subclass haslib algorithm)
'''
def __init__(self, obj):
"""Set the wrapped object."""
super(_MyHashlibAlgo, self).__setattr__('_obj', obj)
methods = []
for name_value in inspect.getmembers(obj, inspect.ismethod):
methods.append(name_value[0])
super(_MyHashlibAlgo, self).__setattr__('__methods__', methods)
def isnotmethod(object_):
"Opposite of ismethod(..)"
return not inspect.ismethod(object_)
members = []
for name_value in inspect.getmembers(obj, isnotmethod):
members.append(name_value[0])
super(_MyHashlibAlgo, self).__setattr__('__members__', members)
def __getattr__(self, name):
"""Redirect unhandled get attribute to self._obj."""
if not hasattr(self._obj, name):
raise AttributeError, ("'%s' has no attribute %s" %
(self.__class__.__name__, name))
else:
return getattr(self._obj, name)
def __setattr__(self, name, value):
"""Redirect set attribute to self._obj if necessary."""
self_has_attr = True
try:
super(_MyHashlibAlgo, self).__getattribute__(name)
except AttributeError:
self_has_attr = False
if (name == "_obj" or not hasattr(self, "_obj") or
not hasattr(self._obj, name) or self_has_attr):
return super(_MyHashlibAlgo, self).__setattr__(name, value)
else:
return setattr(self._obj, name, value)
if HASHLIB_IMPORTED:
from invenio.utils.hash import sha256
class _MySHA256(_MyHashlibAlgo):
"A _MyHashlibAlgo subsclass for sha256"
new = lambda d = '': sha256()
class _MySHA1(_MyHashlibAlgo):
"A _MyHashlibAlgo subsclass for sha1"
new = lambda d = '': sha1()
def auto_version_url(file_path):
""" Appends modification time of the file to the request URL in order for the
browser to refresh the cache when file changes
@param file_path: path to the file, e.g js/foo.js
@return: file_path with modification time appended to URL
"""
file_md5 = ""
try:
file_md5 = md5(open(cfg.get('CFG_WEBDIR') + os.sep + file_path).read()).hexdigest()
except IOError:
pass
return file_path + "?%s" % file_md5
def get_relative_url(url):
"""
Returns the relative URL from a URL. For example:
'http://web.net' -> ''
'http://web.net/' -> ''
'http://web.net/1222' -> '/1222'
'http://web.net/wsadas/asd' -> '/wsadas/asd'
It will never return a trailing "/".
@param url: A url to transform
@type url: str
@return: relative URL
"""
# remove any protocol info before
stripped_site_url = url.replace("://", "")
baseurl = "/" + "/".join(stripped_site_url.split("/")[1:])
# remove any trailing slash ("/")
if baseurl[-1] == "/":
return baseurl[:-1]
else:
return baseurl
|
unknown
|
codeparrot/codeparrot-clean
| ||
import json
import subprocess
from optparse import OptionParser
def parse_arguments():
parser = OptionParser(
prog="openstack_get_cluster_nodes",
version="0",
usage="%prog [options]")
parser.add_option(
"-k", "--key_file", type="string", default="swarm_key_1.pem",
help="Name of ssh key file (default: %default)")
parser.add_option(
"-m", "--manager", type="string", default="manager",
help="Name of the manager node (default: %default)")
parser.add_option(
"-n", "--name", type="string", default="swarm-1",
help="Name of the cluster stack (default: %default)")
parser.add_option(
"-w", "--worker", type="string", default="worker",
help="Name of the worker node (default: %default)")
(opts, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
sys.exit(1)
return opts
# get the commandline arguments
opts = parse_arguments()
#manager_ips = subprocess.run("openstack stack output show -c output_value -f json drupal-1 manager_public_ip", shell=True, stdout=subprocess.PIPE)
#manager_ips = json.loads(str(manager_ips.stdout, encoding='utf-8'))
#manager_ips = json.loads(manager_ips['output_value'])
#worker_ips = subprocess.run("openstack stack output show -c output_value -f json drupal-1 worker_public_ip", shell=True, stdout=subprocess.PIPE)
#worker_ips = json.loads(str(worker_ips.stdout, encoding='utf-8'))
#worker_ips = json.loads(worker_ips['output_value'])
m_ips = subprocess.run("openstack server list --name {} -c Networks -f json".format(opts.manager), shell=True, stdout=subprocess.PIPE)
m_ips = json.loads(str(m_ips.stdout, encoding='utf-8'))
w_ips = subprocess.run("openstack server list --name {} -c Networks -f json".format(opts.worker), shell=True, stdout=subprocess.PIPE)
w_ips = json.loads(str(w_ips.stdout, encoding='utf-8'))
manager_ips = []
for ip in m_ips:
manager_ips.append(ip['Networks'].split('=')[1])
worker_ips = []
for ip in w_ips:
worker_ips.append(ip['Networks'].split('=')[1])
for i, ip in enumerate(manager_ips, start=1):
print("manager-{} ansible_ssh_host={} ansible_user=ubuntu ansible_ssh_private_key_file={}".format(i, ip, opts.key_file))
for i, ip in enumerate(worker_ips, start=1):
print("worker-{} ansible_ssh_host={} ansible_user=ubuntu ansible_ssh_private_key_file={}".format(i, ip, opts.key_file))
print("localhost ansible_connection=local")
print()
print("[docker_engine]")
for i, ip in enumerate(manager_ips, start=1):
print("manager-{}".format(i))
for i, ip in enumerate(worker_ips, start=1):
print("worker-{}".format(i))
print()
print("[docker_swarm_manager]")
for i, ip in enumerate(manager_ips, start=1):
print("manager-{}".format(i))
print()
print("[docker_swarm_worker]")
for i, ip in enumerate(worker_ips, start=1):
print("worker-{}".format(i))
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.sparse.sparsetools import cs_graph_components as _cs_graph_components
from scipy.sparse.csr import csr_matrix
from scipy.sparse.base import isspmatrix
_msg0 = 'x must be a symmetric square matrix!'
_msg1 = _msg0 + '(has shape %s)'
def cs_graph_components(x):
"""
Determine connected components of a graph stored as a compressed
sparse row or column matrix.
For speed reasons, the symmetry of the matrix x is not checked. A
nonzero at index `(i, j)` means that node `i` is connected to node
`j` by an edge. The number of rows/columns of the matrix thus
corresponds to the number of nodes in the graph.
Parameters
-----------
x : array_like or sparse matrix, 2 dimensions
The adjacency matrix of the graph. Only the upper triangular part
is used.
Returns
--------
n_comp : int
The number of connected components.
label : ndarray (ints, 1 dimension):
The label array of each connected component (-2 is used to
indicate empty rows in the matrix: 0 everywhere, including
diagonal). This array has the length of the number of nodes,
i.e. one label for each node of the graph. Nodes having the same
label belong to the same connected component.
Notes
------
The matrix is assumed to be symmetric and the upper triangular part
of the matrix is used. The matrix is converted to a CSR matrix unless
it is already a CSR.
Examples
--------
>>> from scipy.sparse.csgraph import connected_components
>>> D = np.eye(4)
>>> D[0,1] = D[1,0] = 1
>>> cs_graph_components(D)
(3, array([0, 0, 1, 2]))
>>> from scipy.sparse import dok_matrix
>>> cs_graph_components(dok_matrix(D))
(3, array([0, 0, 1, 2]))
"""
try:
shape = x.shape
except AttributeError:
raise ValueError(_msg0)
if not ((len(x.shape) == 2) and (x.shape[0] == x.shape[1])):
raise ValueError(_msg1 % x.shape)
if isspmatrix(x):
x = x.tocsr()
else:
x = csr_matrix(x)
label = np.empty((shape[0],), dtype=x.indptr.dtype)
n_comp = _cs_graph_components(shape[0], x.indptr, x.indices, label)
return n_comp, label
|
unknown
|
codeparrot/codeparrot-clean
| ||
@testable import Vapor
import VaporTestUtils
import XCTVapor
import enum NIOHTTP1.HTTPParserError
import XCTest
import AsyncHTTPClient
import NIOEmbedded
import NIOCore
import NIOConcurrencyHelpers
import class NIOPosix.ClientBootstrap
final class PipelineTests: XCTestCase {
var app: Application!
override func setUp() async throws {
app = try await Application.make(.testing)
}
override func tearDown() async throws {
try await app.asyncShutdown()
}
func testEchoHandlers() throws {
app.on(.POST, "echo", body: .stream) { request -> Response in
Response(body: .init(stream: { writer in
request.body.drain { body in
switch body {
case .buffer(let buffer):
return writer.write(.buffer(buffer))
case .error(let error):
return writer.write(.error(error))
case .end:
return writer.write(.end)
}
}
}))
}
let channel = EmbeddedChannel()
try channel.pipeline.addVaporHTTP1Handlers(
application: app,
responder: app.responder,
configuration: app.http.server.configuration
).wait()
try channel.writeInbound(ByteBuffer(string: "POST /echo HTTP/1.1\r\ntransfer-encoding: chunked\r\n\r\n1\r\na\r\n"))
let chunk = try channel.readOutbound(as: ByteBuffer.self)?.string
XCTAssertContains(chunk, "HTTP/1.1 200 OK")
XCTAssertContains(chunk, "connection: keep-alive")
XCTAssertContains(chunk, "transfer-encoding: chunked")
try XCTAssertEqual(channel.readOutbound(as: ByteBuffer.self)?.string, "1\r\n")
try XCTAssertEqual(channel.readOutbound(as: ByteBuffer.self)?.string, "a")
try XCTAssertEqual(channel.readOutbound(as: ByteBuffer.self)?.string, "\r\n")
try XCTAssertNil(channel.readOutbound(as: ByteBuffer.self)?.string)
try channel.writeInbound(ByteBuffer(string: "1\r\nb\r\n"))
try XCTAssertEqual(channel.readOutbound(as: ByteBuffer.self)?.string, "1\r\n")
try XCTAssertEqual(channel.readOutbound(as: ByteBuffer.self)?.string, "b")
try XCTAssertEqual(channel.readOutbound(as: ByteBuffer.self)?.string, "\r\n")
try XCTAssertNil(channel.readOutbound(as: ByteBuffer.self)?.string)
try channel.writeInbound(ByteBuffer(string: "1\r\nc\r\n"))
try XCTAssertEqual(channel.readOutbound(as: ByteBuffer.self)?.string, "1\r\n")
try XCTAssertEqual(channel.readOutbound(as: ByteBuffer.self)?.string, "c")
try XCTAssertEqual(channel.readOutbound(as: ByteBuffer.self)?.string, "\r\n")
try XCTAssertNil(channel.readOutbound(as: ByteBuffer.self)?.string)
try channel.writeInbound(ByteBuffer(string: "0\r\n\r\n"))
try XCTAssertEqual(channel.readOutbound(as: ByteBuffer.self)?.string, "0\r\n\r\n")
try XCTAssertNil(channel.readOutbound(as: ByteBuffer.self)?.string)
}
func testAsyncEchoHandlers() async throws {
app.on(.POST, "echo", body: .stream) { request async throws -> Response in
var buffers = [ByteBuffer]()
for try await buffer in request.body {
buffers.append(buffer)
}
return Response(body: .init(managedAsyncStream: { [buffers] writer in
for buffer in buffers {
try await writer.writeBuffer(buffer)
}
}))
}
app.environment.arguments = ["serve"]
app.http.server.configuration.port = 0
try await app.startup()
guard
let localAddress = app.http.server.shared.localAddress,
let port = localAddress.port
else {
XCTFail("couldn't get port from \(app.http.server.shared.localAddress.debugDescription)")
return
}
let client = HTTPClient()
let chunks = [
"1\r\n",
"a",
"\r\n",
"1\r\n",
"b",
"\r\n",
"1\r\n",
"c",
"\r\n",
]
let response = try await client.post(url: "http://localhost:\(port)/echo", body: .stream { writer in
let box = UnsafeMutableTransferBox(writer)
@Sendable func write(chunks: [String]) -> EventLoopFuture<Void> {
var chunks = chunks
let chunk = chunks.removeFirst()
if chunks.isEmpty {
return box.wrappedValue.write(.byteBuffer(ByteBuffer(string: chunk)))
} else {
return box.wrappedValue.write(.byteBuffer(ByteBuffer(string: chunk))).flatMap { [chunks] in
return write(chunks: chunks)
}
}
}
return write(chunks: chunks)
}).get()
XCTAssertEqual(response.body?.string, chunks.joined(separator: ""))
try await client.shutdown()
}
func testAsyncFailingHandlers() async throws {
app.on(.POST, "fail", body: .stream) { request async throws -> Response in
return Response(body: .init(managedAsyncStream: { writer in
try await writer.writeBuffer(.init(string: "foo"))
throw Abort(.internalServerError)
}))
}
app.environment.arguments = ["serve"]
app.http.server.configuration.port = 0
try await app.startup()
guard
let localAddress = app.http.server.shared.localAddress,
let port = localAddress.port
else {
XCTFail("couldn't get port from \(app.http.server.shared.localAddress.debugDescription)")
return
}
let client = HTTPClient()
do {
_ = try await client.post(url: "http://localhost:\(port)/fail").get()
XCTFail("Client has failed to detect broken server response")
} catch {
if let error = error as? HTTPParserError {
XCTAssertEqual(error, HTTPParserError.invalidEOFState)
} else {
XCTFail("Caught error \"\(error)\"")
}
}
try await client.shutdown()
}
func testEOFFraming() throws {
app.on(.POST, "echo", body: .stream) { request -> Response in
Response(body: .init(stream: { writer in
request.body.drain { body in
switch body {
case .buffer(let buffer):
return writer.write(.buffer(buffer))
case .error(let error):
return writer.write(.error(error))
case .end:
return writer.write(.end)
}
}
}))
}
let channel = EmbeddedChannel()
try channel.pipeline.addVaporHTTP1Handlers(
application: app,
responder: app.responder,
configuration: app.http.server.configuration
).wait()
try channel.writeInbound(ByteBuffer(string: "POST /echo HTTP/1.1\r\n\r\n"))
try XCTAssertContains(channel.readOutbound(as: ByteBuffer.self)?.string, "HTTP/1.1 200 OK")
}
func testBadStreamLength() throws {
app.on(.POST, "echo", body: .stream) { request -> Response in
Response(body: .init(stream: { writer in
writer.write(.buffer(.init(string: "a")), promise: nil)
writer.write(.end, promise: nil)
}, count: 2))
}
let channel = EmbeddedChannel()
try channel.connect(to: .init(unixDomainSocketPath: "/foo")).wait()
try channel.pipeline.addVaporHTTP1Handlers(
application: app,
responder: app.responder,
configuration: app.http.server.configuration
).wait()
XCTAssertEqual(channel.isActive, true)
// throws a notEnoughBytes error which is good
XCTAssertThrowsError(try channel.writeInbound(ByteBuffer(string: "POST /echo HTTP/1.1\r\n\r\n")))
XCTAssertEqual(channel.isActive, false)
try XCTAssertContains(channel.readOutbound(as: ByteBuffer.self)?.string, "HTTP/1.1 200 OK")
try XCTAssertEqual(channel.readOutbound(as: ByteBuffer.self)?.string, "a")
try XCTAssertNil(channel.readOutbound(as: ByteBuffer.self)?.string)
}
func testInvalidHttp() throws {
let channel = EmbeddedChannel()
try channel.connect(to: .init(unixDomainSocketPath: "/foo")).wait()
try channel.pipeline.addVaporHTTP1Handlers(
application: app,
responder: app.responder,
configuration: app.http.server.configuration
).wait()
XCTAssertEqual(channel.isActive, true)
let request = ByteBuffer(string: "POST /echo/þ HTTP/1.1\r\n\r\n")
XCTAssertThrowsError(try channel.writeInbound(request)) { error in
if let error = error as? HTTPParserError {
XCTAssertEqual(error, HTTPParserError.invalidURL)
} else {
XCTFail("Caught error \"\(error)\"")
}
}
XCTAssertEqual(channel.isActive, false)
try XCTAssertNil(channel.readOutbound(as: ByteBuffer.self)?.string)
}
func testReturningResponseOnDifferentEventLoopDosentCrashLoopBoundBox() async throws {
struct ResponseThing: ResponseEncodable {
let eventLoop: EventLoop
func encodeResponse(for request: Vapor.Request) -> NIOCore.EventLoopFuture<Vapor.Response> {
let response = Response(status: .ok)
return eventLoop.future(response)
}
}
let eventLoop = app!.eventLoopGroup.next()
app.get("dont-crash") { req in
return ResponseThing(eventLoop: eventLoop)
}
try await app.test(.GET, "dont-crash") { res async in
XCTAssertEqual(res.status, .ok)
}
app.environment.arguments = ["serve"]
app.http.server.configuration.port = 0
try await app.startup()
XCTAssertNotNil(app.http.server.shared.localAddress)
guard let localAddress = app.http.server.shared.localAddress,
let port = localAddress.port else {
XCTFail("couldn't get ip/port from \(app.http.server.shared.localAddress.debugDescription)")
return
}
let res = try await app.client.get("http://localhost:\(port)/dont-crash")
XCTAssertEqual(res.status, .ok)
}
func testReturningResponseFromMiddlewareOnDifferentEventLoopDosentCrashLoopBoundBox() async throws {
struct WrongEventLoopMiddleware: Middleware {
func respond(to request: Request, chainingTo next: Responder) -> EventLoopFuture<Response> {
next.respond(to: request).hop(to: request.application.eventLoopGroup.next())
}
}
app.grouped(WrongEventLoopMiddleware()).get("dont-crash") { req in
return "OK"
}
try await app.test(.GET, "dont-crash") { res async in
XCTAssertEqual(res.status, .ok)
}
app.environment.arguments = ["serve"]
app.http.server.configuration.port = 0
try await app.startup()
XCTAssertNotNil(app.http.server.shared.localAddress)
guard let localAddress = app.http.server.shared.localAddress,
let port = localAddress.port else {
XCTFail("couldn't get ip/port from \(app.http.server.shared.localAddress.debugDescription)")
return
}
let res = try await app.client.get("http://localhost:\(port)/dont-crash")
XCTAssertEqual(res.status, .ok)
}
func testStreamingOffEventLoop() async throws {
let eventLoop = app.eventLoopGroup.next()
app.on(.POST, "stream", body: .stream) { request -> Response in
Response(body: .init(stream: { writer in
request.body.drain { body in
switch body {
case .buffer(let buffer):
return writer.write(.buffer(buffer)).hop(to: eventLoop)
case .error(let error):
return writer.write(.error(error)).hop(to: eventLoop)
case .end:
return writer.write(.end).hop(to: eventLoop)
}
}
}))
}
app.environment.arguments = ["serve"]
app.http.server.configuration.port = 0
try await app.startup()
XCTAssertNotNil(app.http.server.shared.localAddress)
guard let localAddress = app.http.server.shared.localAddress,
let port = localAddress.port else {
XCTFail("couldn't get ip/port from \(app.http.server.shared.localAddress.debugDescription)")
return
}
struct ABody: Content {
let hello: String
init() {
self.hello = "hello"
}
}
let res = try await app.client.post("http://localhost:\(port)/stream", beforeSend: {
try $0.content.encode(ABody())
})
XCTAssertEqual(res.status, .ok)
}
func testCorrectResponseOrder() async throws {
app.get("sleep", ":ms") { req -> String in
let ms = try req.parameters.require("ms", as: Int64.self)
try await Task.sleep(nanoseconds: UInt64(ms) * 1_000_000)
return "slept \(ms)ms"
}
let channel = NIOAsyncTestingChannel()
let app = self.app!
_ = try await (channel.eventLoop as! NIOAsyncTestingEventLoop).executeInContext {
channel.pipeline.addVaporHTTP1Handlers(
application: app,
responder: app.responder,
configuration: app.http.server.configuration
)
}
try await channel.writeInbound(ByteBuffer(string: "GET /sleep/100 HTTP/1.1\r\n\r\nGET /sleep/0 HTTP/1.1\r\n\r\n"))
// We expect 6 writes to be there - three parts (the head, body and separator for each request). However, if there are less
// we need to have a timeout to avoid hanging the test
let deadline = NIODeadline.now() + .seconds(5)
var responses: [String] = []
for _ in 0..<6 {
guard NIODeadline.now() < deadline else {
XCTFail("Timed out waiting for responses")
return
}
let res = try await channel.waitForOutboundWrite(as: ByteBuffer.self).string
if res.contains("slept") {
responses.append(res)
}
}
XCTAssertEqual(responses.count, 2)
XCTAssertEqual(responses[0], "slept 100ms")
XCTAssertEqual(responses[1], "slept 0ms")
}
func testCorrectResponseOrderOverVaporTCP() async throws {
app.get("sleep", ":ms") { req -> String in
let ms = try req.parameters.require("ms", as: Int64.self)
try await Task.sleep(nanoseconds: UInt64(ms) * 1_000_000)
return "slept \(ms)ms"
}
app.environment.arguments = ["serve"]
app.http.server.configuration.port = 0
try await app.startup()
let channel = try await ClientBootstrap(group: app.eventLoopGroup)
.connect(host: "127.0.0.1", port: app.http.server.configuration.port) { channel in
channel.eventLoop.makeCompletedFuture {
try NIOAsyncChannel(
wrappingChannelSynchronously: channel,
configuration: NIOAsyncChannel.Configuration(
inboundType: ByteBuffer.self,
outboundType: ByteBuffer.self
)
)
}
}
_ = try await channel.executeThenClose { inbound, outbound in
try await outbound.write(ByteBuffer(string: "GET /sleep/100 HTTP/1.1\r\n\r\nGET /sleep/0 HTTP/1.1\r\n\r\n"))
var data = ByteBuffer()
var sleeps = 0
for try await chunk in inbound {
data.writeImmutableBuffer(chunk)
data.writeString("\r\n")
if String(decoding: chunk.readableBytesView, as: UTF8.self).components(separatedBy: "\r\n").contains(where: { $0.hasPrefix("slept") }) {
sleeps += 1
}
if sleeps == 2 {
break
}
}
let sleptLines = String(decoding: data.readableBytesView, as: UTF8.self).components(separatedBy: "\r\n").filter { $0.contains("slept") }
XCTAssertEqual(sleptLines, ["slept 100ms", "slept 0ms"])
return sleptLines
}
}
override class func setUp() {
XCTAssert(isLoggingConfigured)
}
}
|
swift
|
github
|
https://github.com/vapor/vapor
|
Tests/VaporTests/PipelineTests.swift
|
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright 2012 Varnish Software AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Builders define actions that the Jenkins job should execute. Examples
include shell scripts or maven targets. The ``builders`` attribute in
the :ref:`Job` definition accepts a list of builders to invoke. They
may be components defined below, locally defined macros (using the top
level definition of ``builder:``, or locally defined components found
via the ``jenkins_jobs.builders`` entry point.
**Component**: builders
:Macro: builder
:Entry Point: jenkins_jobs.builders
Example::
job:
name: test_job
builders:
- shell: "make test"
"""
import logging
import xml.etree.ElementTree as XML
from jenkins_jobs.errors import is_sequence
from jenkins_jobs.errors import InvalidAttributeError
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.errors import MissingAttributeError
import jenkins_jobs.modules.base
import jenkins_jobs.modules.helpers as helpers
from jenkins_jobs.modules.helpers import append_git_revision_config
import pkg_resources
from jenkins_jobs.modules.helpers import cloudformation_init
from jenkins_jobs.modules.helpers import cloudformation_region_dict
from jenkins_jobs.modules.helpers import cloudformation_stack
from jenkins_jobs.modules.helpers import config_file_provider_builder
from jenkins_jobs.modules.helpers import config_file_provider_settings
from jenkins_jobs.modules.helpers import convert_mapping_to_xml
from jenkins_jobs.modules.helpers import copyartifact_build_selector
from jenkins_jobs.modules import hudson_model
from jenkins_jobs.modules.publishers import ssh
logger = logging.getLogger(__name__)
def shell(registry, xml_parent, data):
"""yaml: shell
Execute a shell command.
:arg str parameter: the shell command to execute
Example:
.. literalinclude:: /../../tests/builders/fixtures/shell.yaml
:language: yaml
"""
shell = XML.SubElement(xml_parent, 'hudson.tasks.Shell')
XML.SubElement(shell, 'command').text = data
def python(registry, xml_parent, data):
"""yaml: python
Execute a python command. Requires the Jenkins :jenkins-wiki:`Python plugin
<Python+Plugin>`.
:arg str parameter: the python command to execute
Example:
.. literalinclude:: /../../tests/builders/fixtures/python.yaml
:language: yaml
"""
python = XML.SubElement(xml_parent, 'hudson.plugins.python.Python')
XML.SubElement(python, 'command').text = data
def copyartifact(registry, xml_parent, data):
"""yaml: copyartifact
Copy artifact from another project. Requires the :jenkins-wiki:`Copy
Artifact plugin <Copy+Artifact+Plugin>`.
Please note using the multijob-build for which-build argument requires
the :jenkins-wiki:`Multijob plugin <Multijob+Plugin>`
:arg str project: Project to copy from
:arg str filter: what files to copy
:arg str target: Target base directory for copy, blank means use workspace
:arg bool flatten: Flatten directories (default false)
:arg bool optional: If the artifact is missing (for any reason) and
optional is true, the build won't fail because of this builder
(default false)
:arg bool do-not-fingerprint: Disable automatic fingerprinting of copied
artifacts (default false)
:arg str which-build: which build to get artifacts from
(optional, default last-successful)
:which-build values:
* **last-successful**
* **last-completed**
* **specific-build**
* **last-saved**
* **upstream-build**
* **permalink**
* **workspace-latest**
* **build-param**
* **downstream-build**
* **multijob-build**
:arg str build-number: specifies the build number to get when
when specific-build is specified as which-build
:arg str permalink: specifies the permalink to get when
permalink is specified as which-build
:permalink values:
* **last**
* **last-stable**
* **last-successful**
* **last-failed**
* **last-unstable**
* **last-unsuccessful**
:arg bool stable: specifies to get only last stable build when
last-successful is specified as which-build
:arg bool fallback-to-last-successful: specifies to fallback to
last successful build when upstream-build is specified as which-build
:arg string param: specifies to use a build parameter to get the build when
build-param is specified as which-build
:arg str upstream-project-name: specifies the project name of downstream
when downstream-build is specified as which-build
:arg str upstream-build-number: specifies the number of the build to
find its downstream build when downstream-build is specified as
which-build
:arg string parameter-filters: Filter matching jobs based on these
parameters (optional)
Example:
.. literalinclude:: ../../tests/builders/fixtures/copy-artifact001.yaml
:language: yaml
Multijob Example:
.. literalinclude:: ../../tests/builders/fixtures/copy-artifact004.yaml
:language: yaml
"""
t = XML.SubElement(xml_parent, 'hudson.plugins.copyartifact.CopyArtifact')
mappings = [
# Warning: this only works with copy artifact version 1.26+,
# for copy artifact version 1.25- the 'projectName' element needs
# to be used instead of 'project'
('project', 'project', None),
('filter', 'filter', ''),
('target', 'target', ''),
('flatten', 'flatten', False),
('optional', 'optional', False),
('do-not-fingerprint', 'doNotFingerprintArtifacts', False),
('parameter-filters', 'parameters', '')
]
convert_mapping_to_xml(t, data, mappings, fail_required=True)
copyartifact_build_selector(t, data)
def change_assembly_version(registry, xml_parent, data):
"""yaml: change-assembly-version
Change the assembly version.
Requires the Jenkins :jenkins-wiki:`Change Assembly Version
<Change+Assembly+Version>`.
:arg str version: Set the new version number for replace (default 1.0.0)
:arg str assemblyFile: The file name to search (default AssemblyInfo.cs)
Minimal Example:
.. literalinclude::
/../../tests/builders/fixtures/changeassemblyversion-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/builders/fixtures/changeassemblyversion-full.yaml
:language: yaml
"""
cav_builder_tag = ('org.jenkinsci.plugins.changeassemblyversion.'
'ChangeAssemblyVersion')
cav = XML.SubElement(xml_parent, cav_builder_tag)
mappings = [
('version', 'task', '1.0.0'),
('assembly-file', 'assemblyFile', 'AssemblyInfo.cs'),
]
convert_mapping_to_xml(cav, data, mappings, fail_required=True)
def fingerprint(registry, xml_parent, data):
"""yaml: fingerprint
Adds the ability to generate fingerprints as build steps instead of waiting
for a build to complete. Requires the Jenkins :jenkins-wiki:`Fingerprint
Plugin <Fingerprint+Plugin>`.
:arg str targets: Files to fingerprint (default '')
Full Example:
.. literalinclude::
/../../tests/builders/fixtures/fingerprint-full.yaml
:language: yaml
Minimal Example:
.. literalinclude::
/../../tests/builders/fixtures/fingerprint-minimal.yaml
:language: yaml
"""
fingerprint = XML.SubElement(
xml_parent, 'hudson.plugins.createfingerprint.CreateFingerprint')
fingerprint.set('plugin', 'create-fingerprint')
mapping = [('targets', 'targets', '')]
convert_mapping_to_xml(fingerprint, data, mapping, fail_required=True)
def ant(registry, xml_parent, data):
"""yaml: ant
Execute an ant target. Requires the Jenkins :jenkins-wiki:`Ant Plugin
<Ant+Plugin>`.
To setup this builder you can either reference the list of targets
or use named parameters. Below is a description of both forms:
*1) Listing targets:*
After the ant directive, simply pass as argument a space separated list
of targets to build.
:Parameter: space separated list of Ant targets
Example to call two Ant targets:
.. literalinclude:: ../../tests/builders/fixtures/ant001.yaml
:language: yaml
The build file would be whatever the Jenkins Ant Plugin is set to use
per default (i.e build.xml in the workspace root).
*2) Using named parameters:*
:arg str targets: the space separated list of ANT targets.
:arg str buildfile: the path to the ANT build file.
:arg list properties: Passed to ant script using -Dkey=value (optional)
:arg str ant-name: the name of the ant installation,
(default 'default') (optional)
:arg str java-opts: java options for ant, can have multiples,
must be in quotes (optional)
Example specifying the build file too and several targets:
.. literalinclude:: ../../tests/builders/fixtures/ant002.yaml
:language: yaml
"""
ant = XML.SubElement(xml_parent, 'hudson.tasks.Ant')
if type(data) is str:
# Support for short form: -ant: "target"
data = {'targets': data}
for setting, value in sorted(data.items()):
if setting == 'targets':
targets = XML.SubElement(ant, 'targets')
targets.text = value
if setting == 'buildfile':
buildfile = XML.SubElement(ant, 'buildFile')
buildfile.text = value
if setting == 'properties':
properties = data['properties']
prop_string = ''
for prop, val in properties.items():
prop_string += "%s=%s\n" % (prop, val)
prop_element = XML.SubElement(ant, 'properties')
prop_element.text = prop_string
if setting == 'java-opts':
javaopts = data['java-opts']
jopt_string = ' '.join(javaopts)
jopt_element = XML.SubElement(ant, 'antOpts')
jopt_element.text = jopt_string
XML.SubElement(ant, 'antName').text = data.get('ant-name', 'default')
def trigger_remote(registry, xml_parent, data):
"""yaml: trigger-remote
Trigger build of job on remote Jenkins instance.
:jenkins-wiki:`Parameterized Remote Trigger Plugin
<Parameterized+Remote+Trigger+Plugin>`
Please note that this plugin requires system configuration on the Jenkins
Master that is unavailable from individual job views; specifically, one
must add remote jenkins servers whose 'Display Name' field are what make up
valid fields on the `remote-jenkins-name` attribute below.
:arg str remote-jenkins-name: the remote Jenkins server (required)
:arg str job: the Jenkins project to trigger on the remote Jenkins server
(required)
:arg bool should-not-fail-build: if true, remote job failure will not lead
current job to fail (default false)
:arg bool prevent-remote-build-queue: if true, wait to trigger remote
builds until no other builds (default false)
:arg bool block: whether to wait for the trigger jobs to finish or not
(default true)
:arg str poll-interval: polling interval in seconds for checking statues of
triggered remote job, only necessary if current job is configured to
block (default 10)
:arg str connection-retry-limit: number of connection attempts to remote
Jenkins server before giving up. (default 5)
:arg str predefined-parameters: predefined parameters to send to the remote
job when triggering it (optional)
:arg str property-file: file in workspace of current job containing
additional parameters to be set on remote job
(optional)
Example:
.. literalinclude::
/../../tests/builders/fixtures/trigger-remote/trigger-remote001.yaml
:language: yaml
"""
triggerr = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.'
'ParameterizedRemoteTrigger.'
'RemoteBuildConfiguration')
XML.SubElement(triggerr,
'remoteJenkinsName').text = data.get('remote-jenkins-name')
XML.SubElement(triggerr, 'token').text = data.get('token', '')
for attribute in ['job', 'remote-jenkins-name']:
if attribute not in data:
raise MissingAttributeError(attribute, "builders.trigger-remote")
if data[attribute] == '':
raise InvalidAttributeError(attribute,
data[attribute],
"builders.trigger-remote")
XML.SubElement(triggerr, 'job').text = data.get('job')
XML.SubElement(triggerr, 'shouldNotFailBuild').text = str(
data.get('should-not-fail-build', False)).lower()
XML.SubElement(triggerr,
'pollInterval').text = str(data.get('poll-interval', 10))
XML.SubElement(triggerr, 'connectionRetryLimit').text = str(
data.get('connection-retry-limit', 5))
XML.SubElement(triggerr, 'preventRemoteBuildQueue').text = str(
data.get('prevent-remote-build-queue', False)).lower()
XML.SubElement(triggerr, 'blockBuildUntilComplete').text = str(
data.get('block', True)).lower()
if 'predefined-parameters' in data:
parameters = XML.SubElement(triggerr, 'parameters')
parameters.text = data.get('predefined-parameters', '')
params_list = parameters.text.split("\n")
parameter_list = XML.SubElement(triggerr, 'parameterList')
for param in params_list:
if param == '':
continue
tmp = XML.SubElement(parameter_list, 'string')
tmp.text = param
if 'property-file' in data and data['property-file'] != '':
XML.SubElement(triggerr, 'loadParamsFromFile').text = 'true'
XML.SubElement(triggerr,
'parameterFile').text = data.get('property-file')
else:
XML.SubElement(triggerr, 'loadParamsFromFile').text = 'false'
XML.SubElement(triggerr, 'overrideAuth').text = "false"
def trigger_builds(registry, xml_parent, data):
"""yaml: trigger-builds
Trigger builds of other jobs.
Requires the Jenkins :jenkins-wiki:`Parameterized Trigger Plugin
<Parameterized+Trigger+Plugin>`.
:arg list project: the Jenkins project to trigger
:arg str predefined-parameters: key/value pairs to be passed to the job
(optional)
:arg list bool-parameters:
:Bool:
* **name** (`str`) -- Parameter name
* **value** (`bool`) -- Value to set (default false)
:arg str property-file:
Pass properties from file to the other job (optional)
:arg bool property-file-fail-on-missing:
Don't trigger if any files are missing (default true)
:arg bool current-parameters: Whether to include the parameters passed
to the current build to the triggered job.
:arg str node-label-name: Define a name for the NodeLabel parameter to be
set. Used in conjunction with node-label. Requires NodeLabel Parameter
Plugin (optional)
:arg str node-label: Label of the nodes where build should be triggered.
Used in conjunction with node-label-name. Requires NodeLabel Parameter
Plugin (optional)
:arg str restrict-matrix-project: Filter that restricts the subset
of the combinations that the triggered job will run (optional)
:arg bool svn-revision: Whether to pass the svn revision to the triggered
job (optional)
:arg dict git-revision: Passes git revision to the triggered job
(optional).
* **combine-queued-commits** (bool): Whether to combine queued git
hashes or not (default false)
:arg bool block: whether to wait for the triggered jobs to finish or not
(default false)
:arg dict block-thresholds: Fail builds and/or mark as failed or unstable
based on thresholds. Only apply if block parameter is true (optional)
:block-thresholds:
* **build-step-failure-threshold** (`str`) - build step failure
threshold, valid values are 'never', 'SUCCESS', 'UNSTABLE', or
'FAILURE'. (default 'FAILURE')
* **unstable-threshold** (`str`) - unstable threshold, valid
values are 'never', 'SUCCESS', 'UNSTABLE', or 'FAILURE'.
(default 'UNSTABLE')
* **failure-threshold** (`str`) - overall failure threshold, valid
values are 'never', 'SUCCESS', 'UNSTABLE', or 'FAILURE'.
(default 'FAILURE')
:arg bool same-node: Use the same node for the triggered builds that was
used for this build (optional)
:arg list parameter-factories: list of parameter factories
:Factory:
* **factory** (`str`) **filebuild** -- For every property file,
invoke one build
* **file-pattern** (`str`) -- File wildcard pattern
* **no-files-found-action** (`str`) -- Action to perform when
no files found. Valid values 'FAIL', 'SKIP', or 'NOPARMS'.
(default 'SKIP')
:Factory:
* **factory** (`str`) **binaryfile** -- For every matching
file, invoke one build
* **file-pattern** (`str`) -- Artifact ID of the artifact
* **no-files-found-action** (`str`) -- Action to perform when
no files found. Valid values 'FAIL', 'SKIP', or 'NOPARMS'.
(default 'SKIP')
:Factory:
* **factory** (`str`) **counterbuild** -- Invoke i=0...N builds
* **from** (`int`) -- Artifact ID of the artifact
* **to** (`int`) -- Version of the artifact
* **step** (`int`) -- Classifier of the artifact
* **parameters** (`str`) -- KEY=value pairs, one per line
(default '')
* **validation-fail** (`str`) -- Action to perform when
stepping validation fails. Valid values 'FAIL', 'SKIP', or
'NOPARMS'. (default 'FAIL')
:Factory:
* **factory** (`str`) **allnodesforlabel** -- Trigger a build
on all nodes having specific label. Requires NodeLabel
Parameter Plugin (optional)
* **name** (`str`) -- Name of the parameter to set (optional)
* **node-label** (`str`) -- Label of the nodes where build
should be triggered
* **ignore-offline-nodes** (`bool`) -- Don't trigger build on
offline nodes (default true)
:Factory:
* **factory** (`str`) **allonlinenodes** -- Trigger a build on
every online node. Requires NodeLabel Parameter Plugin (optional)
Examples:
Basic usage with yaml list of projects.
.. literalinclude::
/../../tests/builders/fixtures/trigger-builds/project-list.yaml
:language: yaml
Basic usage with passing svn revision through.
.. literalinclude:: /../../tests/builders/fixtures/trigger-builds001.yaml
:language: yaml
Basic usage with passing git revision through.
.. literalinclude:: /../../tests/builders/fixtures/trigger-builds006.yaml
:language: yaml
Example with all supported parameter factories.
.. literalinclude::
/../../tests/builders/fixtures/trigger-builds-configfactory-multi.yaml
:language: yaml
"""
tbuilder = XML.SubElement(xml_parent,
'hudson.plugins.parameterizedtrigger.'
'TriggerBuilder')
configs = XML.SubElement(tbuilder, 'configs')
for project_def in data:
if 'project' not in project_def or project_def['project'] == '':
logger.debug("No project specified - skipping trigger-build")
continue
tconfig = XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'BlockableBuildTriggerConfig')
tconfigs = XML.SubElement(tconfig, 'configs')
if(project_def.get('current-parameters')):
XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'CurrentBuildParameters')
if(project_def.get('svn-revision')):
XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'SubversionRevisionBuildParameters')
if(project_def.get('git-revision')):
append_git_revision_config(tconfigs, project_def['git-revision'])
if(project_def.get('same-node')):
XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'NodeParameters')
if 'property-file' in project_def:
params = XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'FileBuildParameters')
propertiesFile = XML.SubElement(params, 'propertiesFile')
propertiesFile.text = project_def['property-file']
failTriggerOnMissing = XML.SubElement(params,
'failTriggerOnMissing')
failTriggerOnMissing.text = str(project_def.get(
'property-file-fail-on-missing', True)).lower()
if 'predefined-parameters' in project_def:
params = XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'PredefinedBuildParameters')
properties = XML.SubElement(params, 'properties')
properties.text = project_def['predefined-parameters']
if 'bool-parameters' in project_def:
params = XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'BooleanParameters')
configs = XML.SubElement(params, 'configs')
for bool_param in project_def['bool-parameters']:
param = XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'BooleanParameterConfig')
XML.SubElement(param, 'name').text = str(bool_param['name'])
XML.SubElement(param, 'value').text = str(
bool_param.get('value', False)).lower()
if 'node-label-name' in project_def and 'node-label' in project_def:
node = XML.SubElement(tconfigs, 'org.jvnet.jenkins.plugins.'
'nodelabelparameter.parameterizedtrigger.'
'NodeLabelBuildParameter')
XML.SubElement(node, 'name').text = project_def['node-label-name']
XML.SubElement(node, 'nodeLabel').text = project_def['node-label']
if 'restrict-matrix-project' in project_def:
params = XML.SubElement(tconfigs,
'hudson.plugins.parameterizedtrigger.'
'matrix.MatrixSubsetBuildParameters')
XML.SubElement(params, 'filter').text = project_def[
'restrict-matrix-project']
if(len(list(tconfigs)) == 0):
tconfigs.set('class', 'java.util.Collections$EmptyList')
if 'parameter-factories' in project_def:
fconfigs = XML.SubElement(tconfig, 'configFactories')
supported_factories = ['filebuild',
'binaryfile',
'counterbuild',
'allnodesforlabel',
'allonlinenodes']
supported_actions = ['SKIP', 'NOPARMS', 'FAIL']
for factory in project_def['parameter-factories']:
if factory['factory'] not in supported_factories:
raise InvalidAttributeError('factory',
factory['factory'],
supported_factories)
if factory['factory'] == 'filebuild':
params = XML.SubElement(
fconfigs,
'hudson.plugins.parameterizedtrigger.'
'FileBuildParameterFactory')
if factory['factory'] == 'binaryfile':
params = XML.SubElement(
fconfigs,
'hudson.plugins.parameterizedtrigger.'
'BinaryFileParameterFactory')
parameterName = XML.SubElement(params, 'parameterName')
parameterName.text = factory['parameter-name']
if (factory['factory'] == 'filebuild' or
factory['factory'] == 'binaryfile'):
filePattern = XML.SubElement(params, 'filePattern')
filePattern.text = factory['file-pattern']
noFilesFoundAction = XML.SubElement(
params,
'noFilesFoundAction')
noFilesFoundActionValue = str(factory.get(
'no-files-found-action', 'SKIP'))
if noFilesFoundActionValue not in supported_actions:
raise InvalidAttributeError('no-files-found-action',
noFilesFoundActionValue,
supported_actions)
noFilesFoundAction.text = noFilesFoundActionValue
if factory['factory'] == 'counterbuild':
params = XML.SubElement(
fconfigs,
'hudson.plugins.parameterizedtrigger.'
'CounterBuildParameterFactory')
fromProperty = XML.SubElement(params, 'from')
fromProperty.text = str(factory['from'])
toProperty = XML.SubElement(params, 'to')
toProperty.text = str(factory['to'])
stepProperty = XML.SubElement(params, 'step')
stepProperty.text = str(factory['step'])
paramExpr = XML.SubElement(params, 'paramExpr')
paramExpr.text = str(factory.get(
'parameters', ''))
validationFail = XML.SubElement(params, 'validationFail')
validationFailValue = str(factory.get(
'validation-fail', 'FAIL'))
if validationFailValue not in supported_actions:
raise InvalidAttributeError('validation-fail',
validationFailValue,
supported_actions)
validationFail.text = validationFailValue
if factory['factory'] == 'allnodesforlabel':
params = XML.SubElement(
fconfigs,
'org.jvnet.jenkins.plugins.nodelabelparameter.'
'parameterizedtrigger.'
'AllNodesForLabelBuildParameterFactory')
nameProperty = XML.SubElement(params, 'name')
nameProperty.text = str(factory.get(
'name', ''))
nodeLabel = XML.SubElement(params, 'nodeLabel')
nodeLabel.text = str(factory['node-label'])
ignoreOfflineNodes = XML.SubElement(
params,
'ignoreOfflineNodes')
ignoreOfflineNodes.text = str(factory.get(
'ignore-offline-nodes', True)).lower()
if factory['factory'] == 'allonlinenodes':
params = XML.SubElement(
fconfigs,
'org.jvnet.jenkins.plugins.nodelabelparameter.'
'parameterizedtrigger.'
'AllNodesBuildParameterFactory')
projects = XML.SubElement(tconfig, 'projects')
if isinstance(project_def['project'], list):
projects.text = ",".join(project_def['project'])
else:
projects.text = project_def['project']
condition = XML.SubElement(tconfig, 'condition')
condition.text = 'ALWAYS'
trigger_with_no_params = XML.SubElement(tconfig,
'triggerWithNoParameters')
trigger_with_no_params.text = 'false'
build_all_nodes_with_label = XML.SubElement(tconfig,
'buildAllNodesWithLabel')
build_all_nodes_with_label.text = 'false'
block = project_def.get('block', False)
if block:
block = XML.SubElement(tconfig, 'block')
supported_thresholds = [['build-step-failure-threshold',
'buildStepFailureThreshold',
'FAILURE'],
['unstable-threshold',
'unstableThreshold',
'UNSTABLE'],
['failure-threshold',
'failureThreshold',
'FAILURE']]
supported_threshold_values = ['never',
hudson_model.SUCCESS['name'],
hudson_model.UNSTABLE['name'],
hudson_model.FAILURE['name']]
thrsh = project_def.get('block-thresholds', False)
for toptname, txmltag, tvalue in supported_thresholds:
if thrsh:
tvalue = thrsh.get(toptname, tvalue)
if tvalue.lower() == supported_threshold_values[0]:
continue
if tvalue.upper() not in supported_threshold_values:
raise InvalidAttributeError(toptname,
tvalue,
supported_threshold_values)
th = XML.SubElement(block, txmltag)
XML.SubElement(th, 'name').text = hudson_model.THRESHOLDS[
tvalue.upper()]['name']
XML.SubElement(th, 'ordinal').text = hudson_model.THRESHOLDS[
tvalue.upper()]['ordinal']
XML.SubElement(th, 'color').text = hudson_model.THRESHOLDS[
tvalue.upper()]['color']
XML.SubElement(th, 'completeBuild').text = "true"
# If configs is empty, remove the entire tbuilder tree.
if(len(configs) == 0):
logger.debug("Pruning empty TriggerBuilder tree.")
xml_parent.remove(tbuilder)
def builders_from(registry, xml_parent, data):
"""yaml: builders-from
Use builders from another project.
Requires the Jenkins :jenkins-wiki:`Template Project Plugin
<Template+Project+Plugin>`.
:arg str projectName: the name of the other project
Example:
.. literalinclude:: ../../tests/builders/fixtures/builders-from.yaml
:language: yaml
"""
pbs = XML.SubElement(xml_parent,
'hudson.plugins.templateproject.ProxyBuilder')
XML.SubElement(pbs, 'projectName').text = data
def http_request(registry, xml_parent, data):
"""yaml: http-request
This plugin sends a http request to an url with some parameters.
Requires the Jenkins :jenkins-wiki:`HTTP Request Plugin
<HTTP+Request+Plugin>`.
:arg str url: Specify an URL to be requested (required)
:arg str mode: The http mode of the request (default GET)
:mode values:
* **GET**
* **POST**
* **PUT**
* **DELETE**
* **HEAD**
:arg str content-type: Add 'Content-type: foo' HTTP request headers
where foo is the http content-type the request is using.
(default NOT_SET)
:arg str accept-type: Add 'Accept: foo' HTTP request headers
where foo is the http content-type to accept (default NOT_SET)
:content-type and accept-type values:
* **NOT_SET**
* **TEXT_HTML**
* **APPLICATION_JSON**
* **APPLICATION_TAR**
* **APPLICATION_ZIP**
* **APPLICATION_OCTETSTREAM**
:arg str output-file: Name of the file in which to write response data
(default '')
:arg int time-out: Specify a timeout value in seconds (default 0)
:arg bool console-log: This allows you to turn off writing the response
body to the log (default false)
:arg bool pass-build: Should build parameters be passed to the URL
being called (default false)
:arg str valid-response-codes: Configure response code to mark an
execution as success. You can configure simple code such as "200"
or multiple codes separeted by comma(',') e.g. "200,404,500"
Interval of codes should be in format From:To e.g. "100:399".
The default (as if empty) is to fail to 4xx and 5xx.
That means success from 100 to 399 "100:399"
To ignore any response code use "100:599". (default '')
:arg str valid-response-content: If set response must contain this string
to mark an execution as success (default '')
:arg str authentication-key: Authentication that will be used before this
request. Authentications are created in global configuration under a
key name that is selected here.
:arg list custom-headers: list of header parameters
:custom-header:
* **name** (`str`) -- Name of the header
* **value** (`str`) -- Value of the header
Example:
.. literalinclude:: ../../tests/builders/fixtures/http-request-minimal.yaml
:language: yaml
.. literalinclude::
../../tests/builders/fixtures/http-request-full.yaml
:language: yaml
"""
http_request = XML.SubElement(xml_parent,
'jenkins.plugins.http__request.HttpRequest')
http_request.set('plugin', 'http_request')
valid_modes = ['GET', 'POST', 'PUT', 'DELETE', 'HEAD']
valid_types = ['NOT_SET', 'TEXT_HTML', 'APPLICATION_JSON',
'APPLICATION_TAR', 'APPLICATION_ZIP',
'APPLICATION_OCTETSTREAM']
mappings = [
('url', 'url', None),
('mode', 'httpMode', 'GET', valid_modes),
('content-type', 'contentType', 'NOT_SET', valid_types),
('accept-type', 'acceptType', 'NOT_SET', valid_types),
('output-file', 'outputFile', ''),
('console-log', 'consoleLogResponseBody', False),
('pass-build', 'passBuildParameters', False),
('time-out', 'timeout', 0),
('valid-response-codes', 'validResponseCodes', ''),
('valid-response-content', 'validResponseContent', '')]
convert_mapping_to_xml(http_request, data, mappings, fail_required=True)
if 'authentication-key' in data:
XML.SubElement(
http_request, 'authentication').text = data['authentication-key']
if 'custom-headers' in data:
customHeader = XML.SubElement(http_request, 'customHeaders')
header_mappings = [
('name', 'name', None),
('value', 'value', None)
]
for customhead in data['custom-headers']:
pair = XML.SubElement(customHeader, 'pair')
convert_mapping_to_xml(pair,
customhead,
header_mappings,
fail_required=True)
def inject(registry, xml_parent, data):
"""yaml: inject
Inject an environment for the job.
Requires the Jenkins :jenkins-wiki:`EnvInject Plugin
<EnvInject+Plugin>`.
:arg str properties-file: the name of the property file (optional)
:arg str properties-content: the properties content (optional)
:arg str script-file: the name of a script file to run (optional)
:arg str script-content: the script content (optional)
Example:
.. literalinclude:: ../../tests/builders/fixtures/inject.yaml
:language: yaml
"""
eib = XML.SubElement(xml_parent, 'EnvInjectBuilder')
info = XML.SubElement(eib, 'info')
mapping = [
('properties-file', 'propertiesFilePath', None),
('properties-content', 'propertiesContent', None),
('script-file', 'scriptFilePath', None),
('script-content', 'scriptContent', None),
]
convert_mapping_to_xml(info, data, mapping, fail_required=False)
def kmap(registry, xml_parent, data):
"""yaml: kmap
Publish mobile applications to your Keivox KMAP Private Mobile App Store.
Requires the Jenkins :jenkins-wiki:`Keivox KMAP Private Mobile App Store
Plugin <Keivox+KMAP+Private+Mobile+App+Store+Plugin>`.
:arg str username: KMAP's user email with permissions to upload/publish
applications to KMAP (required)
:arg str password: Password for the KMAP user uploading/publishing
applications (required)
:arg str url: KMAP's url. This url must always end with "/kmap-client/".
For example: http://testing.keivox.com/kmap-client/ (required)
:arg str categories: Categories' names. If you want to add the application
to more than one category, write the categories between commas.
(required)
:arg str file-path: Path to the application's file (required)
:arg str app-name: KMAP's application name (required)
:arg str bundle: Bundle indentifier (default '')
:arg str version: Application's version (required)
:arg str description: Application's description (default '')
:arg str icon-path: Path to the application's icon (default '')
:arg bool publish-optional: Publish application after it has been uploaded
to KMAP (default false)
:publish-optional:
* **groups** ('str') -- groups' names to publish the application
(default '')
* **users** ('str') -- users' names to publish the application
(default '')
* **notify-users** ('bool') -- Send notifications to the users and
groups when publishing the application (default false)
Minimal Example:
.. literalinclude:: ../../tests/builders/fixtures/kmap-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: ../../tests/builders/fixtures/kmap-full.yaml
:language: yaml
"""
kmap = XML.SubElement(
xml_parent, 'org.jenkinsci.plugins.KmapJenkinsBuilder')
kmap.set('plugin', 'kmap-jenkins')
publish = data.get('publish-optional', False)
mapping = [
('username', 'username', None),
('password', 'password', None),
('url', 'kmapClient', None),
('categories', 'categories', None),
('file-path', 'filePath', None),
('app-name', 'appName', None),
('bundle', 'bundle', ''),
('version', 'version', None),
('description', 'description', ''),
('icon-path', 'iconPath', ''),
]
convert_mapping_to_xml(kmap, data, mapping, fail_required=True)
if publish is True:
publish_optional = XML.SubElement(kmap, 'publishOptional')
publish_mapping = [
('groups', 'teams', ''),
('users', 'users', ''),
('notify-users', 'sendNotifications', False),
]
convert_mapping_to_xml(
publish_optional, data, publish_mapping, fail_required=True)
def artifact_resolver(registry, xml_parent, data):
"""yaml: artifact-resolver
Allows one to resolve artifacts from a maven repository like nexus
(without having maven installed)
Requires the Jenkins :jenkins-wiki:`Repository Connector Plugin
<Repository+Connector+Plugin>`.
:arg bool fail-on-error: Whether to fail the build on error (default false)
:arg bool repository-logging: Enable repository logging (default false)
:arg str target-directory: Where to resolve artifacts to
:arg list artifacts: list of artifacts to resolve
:Artifact:
* **group-id** (`str`) -- Group ID of the artifact
* **artifact-id** (`str`) -- Artifact ID of the artifact
* **version** (`str`) -- Version of the artifact
* **classifier** (`str`) -- Classifier of the artifact (default '')
* **extension** (`str`) -- Extension of the artifact
(default 'jar')
* **target-file-name** (`str`) -- What to name the artifact
(default '')
Example:
.. literalinclude:: ../../tests/builders/fixtures/artifact-resolver.yaml
:language: yaml
"""
ar = XML.SubElement(xml_parent,
'org.jvnet.hudson.plugins.repositoryconnector.'
'ArtifactResolver')
XML.SubElement(ar, 'targetDirectory').text = data['target-directory']
artifacttop = XML.SubElement(ar, 'artifacts')
artifacts = data['artifacts']
for artifact in artifacts:
rcartifact = XML.SubElement(artifacttop,
'org.jvnet.hudson.plugins.'
'repositoryconnector.Artifact')
XML.SubElement(rcartifact, 'groupId').text = artifact['group-id']
XML.SubElement(rcartifact, 'artifactId').text = artifact['artifact-id']
XML.SubElement(rcartifact, 'classifier').text = artifact.get(
'classifier', '')
XML.SubElement(rcartifact, 'version').text = artifact['version']
XML.SubElement(rcartifact, 'extension').text = artifact.get(
'extension', 'jar')
XML.SubElement(rcartifact, 'targetFileName').text = artifact.get(
'target-file-name', '')
XML.SubElement(ar, 'failOnError').text = str(data.get(
'fail-on-error', False)).lower()
XML.SubElement(ar, 'enableRepoLogging').text = str(data.get(
'repository-logging', False)).lower()
XML.SubElement(ar, 'snapshotUpdatePolicy').text = 'never'
XML.SubElement(ar, 'releaseUpdatePolicy').text = 'never'
XML.SubElement(ar, 'snapshotChecksumPolicy').text = 'warn'
XML.SubElement(ar, 'releaseChecksumPolicy').text = 'warn'
def doxygen(registry, xml_parent, data):
"""yaml: doxygen
Builds doxygen HTML documentation. Requires the Jenkins
:jenkins-wiki:`Doxygen plugin <Doxygen+Plugin>`.
:arg str doxyfile: The doxyfile path (required)
:arg str install: The doxygen installation to use (required)
:arg bool ignore-failure: Keep executing build even on doxygen generation
failure (default false)
:arg bool unstable-warning: Mark the build as unstable if warnings are
generated (default false)
Example:
.. literalinclude:: /../../tests/builders/fixtures/doxygen001.yaml
:language: yaml
"""
doxygen = XML.SubElement(xml_parent,
'hudson.plugins.doxygen.DoxygenBuilder')
mappings = [
('doxyfile', 'doxyfilePath', None),
('install', 'installationName', None),
('ignore-failure', 'continueOnBuildFailure', False),
('unstable-warning', 'unstableIfWarnings', False)
]
convert_mapping_to_xml(doxygen, data, mappings, fail_required=True)
def gradle(registry, xml_parent, data):
"""yaml: gradle
Execute gradle tasks. Requires the Jenkins :jenkins-wiki:`Gradle Plugin
<Gradle+Plugin>`.
:arg str tasks: List of tasks to execute
:arg str gradle-name: Use a custom gradle name (default '')
:arg bool wrapper: use gradle wrapper (default false)
:arg bool executable: make gradlew executable (default false)
:arg list switches: Switches for gradle, can have multiples
:arg bool use-root-dir: Whether to run the gradle script from the
top level directory or from a different location (default false)
:arg str root-build-script-dir: If your workspace has the
top-level build.gradle in somewhere other than the module
root directory, specify the path (relative to the module
root) here, such as ${workspace}/parent/ instead of just
${workspace}.
:arg str build-file: name of gradle build script (default 'build.gradle')
Example:
.. literalinclude:: ../../tests/builders/fixtures/gradle.yaml
:language: yaml
"""
gradle = XML.SubElement(xml_parent, 'hudson.plugins.gradle.Gradle')
XML.SubElement(gradle, 'description').text = ''
mappings = [
('build-file', 'buildFile', 'build.gradle'),
('tasks', 'tasks', None),
('root-build-script-dir', 'rootBuildScriptDir', ''),
('gradle-name', 'gradleName', ''),
('wrapper', 'useWrapper', False),
('executable', 'makeExecutable', False),
('use-root-dir', 'fromRootBuildScriptDir', False),
]
convert_mapping_to_xml(gradle, data, mappings, fail_required=True)
XML.SubElement(gradle, 'switches').text = '\n'.join(
data.get('switches', []))
def _groovy_common_scriptSource(data):
"""Helper function to generate the XML element common to groovy builders
"""
scriptSource = XML.Element("scriptSource")
if 'command' in data and 'file' in data:
raise JenkinsJobsException("Use just one of 'command' or 'file'")
if 'command' in data:
command = XML.SubElement(scriptSource, 'command')
command.text = str(data['command'])
scriptSource.set('class', 'hudson.plugins.groovy.StringScriptSource')
elif 'file' in data:
scriptFile = XML.SubElement(scriptSource, 'scriptFile')
scriptFile.text = str(data['file'])
scriptSource.set('class', 'hudson.plugins.groovy.FileScriptSource')
else:
raise JenkinsJobsException("A groovy command or file is required")
return scriptSource
def groovy(registry, xml_parent, data):
"""yaml: groovy
Execute a groovy script or command.
Requires the Jenkins :jenkins-wiki:`Groovy Plugin <Groovy+plugin>`.
:arg str file: Groovy file to run. (Alternative: you can chose a command
instead)
:arg str command: Groovy command to run. (Alternative: you can chose a
script file instead)
:arg str version: Groovy version to use. (default '(Default)')
:arg str parameters: Parameters for the Groovy executable. (default '')
:arg str script-parameters: These parameters will be passed to the script.
(default '')
:arg str properties: Instead of passing properties using the -D parameter
you can define them here. (default '')
:arg str java-opts: Direct access to JAVA_OPTS. Properties allows only
-D properties, while sometimes also other properties like -XX need to
be setup. It can be done here. This line is appended at the end of
JAVA_OPTS string. (default '')
:arg str class-path: Specify script classpath here. Each line is one
class path item. (default '')
Minimal Example:
.. literalinclude:: ../../tests/builders/fixtures/groovy-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: ../../tests/builders/fixtures/groovy-full.yaml
:language: yaml
"""
root_tag = 'hudson.plugins.groovy.Groovy'
groovy = XML.SubElement(xml_parent, root_tag)
groovy.append(_groovy_common_scriptSource(data))
mappings = [
('version', 'groovyName', '(Default)'),
('parameters', 'parameters', ''),
('script-parameters', 'scriptParameters', ''),
('properties', 'properties', ''),
('java-opts', 'javaOpts', ''),
('class-path', 'classPath', '')
]
convert_mapping_to_xml(groovy, data, mappings, fail_required=True)
def system_groovy(registry, xml_parent, data):
"""yaml: system-groovy
Execute a system groovy script or command.
Requires the Jenkins :jenkins-wiki:`Groovy Plugin <Groovy+plugin>`.
:arg str file: Groovy file to run. (Alternative: you can chose a command
instead)
:arg str command: Groovy command to run. (Alternative: you can chose a
script file instead)
:arg str bindings: Define variable bindings (in the properties file
format). Specified variables can be addressed from the script.
(optional)
:arg str class-path: Specify script classpath here. Each line is one class
path item. (optional)
Examples:
.. literalinclude:: ../../tests/builders/fixtures/system-groovy001.yaml
:language: yaml
.. literalinclude:: ../../tests/builders/fixtures/system-groovy002.yaml
:language: yaml
"""
root_tag = 'hudson.plugins.groovy.SystemGroovy'
sysgroovy = XML.SubElement(xml_parent, root_tag)
sysgroovy.append(_groovy_common_scriptSource(data))
mapping = [
('bindings', 'bindings', ''),
('class-path', 'classpath', ''),
]
convert_mapping_to_xml(sysgroovy, data, mapping, fail_required=True)
def batch(registry, xml_parent, data):
"""yaml: batch
Execute a batch command.
:Parameter: the batch command to execute
Example:
.. literalinclude:: ../../tests/builders/fixtures/batch.yaml
:language: yaml
"""
batch = XML.SubElement(xml_parent, 'hudson.tasks.BatchFile')
XML.SubElement(batch, 'command').text = data
def powershell(registry, xml_parent, data):
"""yaml: powershell
Execute a powershell command. Requires the :jenkins-wiki:`Powershell Plugin
<PowerShell+Plugin>`.
:Parameter: the powershell command to execute
Example:
.. literalinclude:: ../../tests/builders/fixtures/powershell.yaml
:language: yaml
"""
ps = XML.SubElement(xml_parent, 'hudson.plugins.powershell.PowerShell')
XML.SubElement(ps, 'command').text = data
def msbuild(registry, xml_parent, data):
"""yaml: msbuild
Build .NET project using msbuild. Requires the :jenkins-wiki:`Jenkins
MSBuild Plugin <MSBuild+Plugin>`.
:arg str msbuild-version: which msbuild configured in Jenkins to use
(default '(Default)')
:arg str solution-file: location of the solution file to build (required)
:arg str extra-parameters: extra parameters to pass to msbuild (default '')
:arg bool pass-build-variables: should build variables be passed
to msbuild (default true)
:arg bool continue-on-build-failure: should the build continue if
msbuild returns an error (default false)
:arg bool unstable-if-warnings: If set to true and warnings on compilation,
the build will be unstable (>=1.20) (default false)
Full Example:
.. literalinclude:: ../../tests/builders/fixtures/msbuild-full.yaml
:language: yaml
Minimal Example:
.. literalinclude:: ../../tests/builders/fixtures/msbuild-minimal.yaml
:language: yaml
"""
msbuilder = XML.SubElement(xml_parent,
'hudson.plugins.msbuild.MsBuildBuilder')
msbuilder.set('plugin', 'msbuild')
mapping = [
('msbuild-version', 'msBuildName', '(Default)'),
('solution-file', 'msBuildFile', None),
('extra-parameters', 'cmdLineArgs', ''),
('pass-build-variables', 'buildVariablesAsProperties', True),
('continue-on-build-failure', 'continueOnBuildFailure', False),
('unstable-if-warnings', 'unstableIfWarnings', False)
]
convert_mapping_to_xml(msbuilder, data, mapping, fail_required=True)
def create_builders(registry, step):
dummy_parent = XML.Element("dummy")
registry.dispatch('builder', dummy_parent, step)
return list(dummy_parent)
def conditional_step(registry, xml_parent, data):
"""yaml: conditional-step
Conditionally execute some build steps. Requires the Jenkins
:jenkins-wiki:`Conditional BuildStep Plugin
<Conditional+BuildStep+Plugin>`.
Depending on the number of declared steps, a `Conditional step (single)`
or a `Conditional steps (multiple)` is created in Jenkins.
:arg str condition-kind: Condition kind that must be verified before the
steps are executed. Valid values and their additional attributes are
described in the conditions_ table.
:arg str on-evaluation-failure: What should be the outcome of the build
if the evaluation of the condition fails. Possible values are `fail`,
`mark-unstable`, `run-and-mark-unstable`, `run` and `dont-run`.
(default 'fail').
:arg list steps: List of steps to run if the condition is verified. Items
in the list can be any builder known by Jenkins Job Builder.
.. _conditions:
================== ====================================================
Condition kind Description
================== ====================================================
always Condition is always verified
never Condition is never verified
boolean-expression Run the step if the expression expends to a
representation of true
:condition-expression: Expression to expand (required)
build-cause Run if the current build has a specific cause
:cause: The cause why the build was triggered.
Following causes are supported -
:USER_CAUSE: build was triggered by a manual
interaction. (default)
:SCM_CAUSE: build was triggered by a SCM change.
:TIMER_CAUSE: build was triggered by a timer.
:CLI_CAUSE: build was triggered by via CLI interface
:REMOTE_CAUSE: build was triggered via remote
interface.
:UPSTREAM_CAUSE: build was triggered by an upstream
project.
Following supported if XTrigger plugin installed:
:FS_CAUSE: build was triggered by a file system
change (FSTrigger Plugin).
:URL_CAUSE: build was triggered by a URL change
(URLTrigger Plugin)
:IVY_CAUSE: build triggered by an Ivy dependency
version has change (IvyTrigger Plugin)
:SCRIPT_CAUSE: build was triggered by a script
(ScriptTrigger Plugin)
:BUILDRESULT_CAUSE: build was triggered by a
result of an other job (BuildResultTrigger Plugin)
:exclusive-cause: (bool) There might by multiple
casues causing a build to be triggered, with
this true, the cause must be the only one
causing this build this build to be triggered.
(default false)
day-of-week Only run on specific days of the week.
:day-selector: Days you want the build to run on.
Following values are supported -
:weekend: Saturday and Sunday (default).
:weekday: Monday - Friday.
:select-days: Selected days, defined by 'days'
below.
:days: True for days for which the build should
run. Definition needed only for 'select-days'
day-selector, at the same level as day-selector.
Define the days to run under this.
:SUN: Run on Sunday (default false)
:MON: Run on Monday (default false)
:TUES: Run on Tuesday (default false)
:WED: Run on Wednesday (default false)
:THURS: Run on Thursday (default false)
:FRI: Run on Friday (default false)
:SAT: Run on Saturday (default false)
:use-build-time: (bool) Use the build time instead of
the the time that the condition is evaluated.
(default false)
execution-node Run only on selected nodes.
:nodes: (list) List of nodes to execute on. (required)
strings-match Run the step if two strings match
:condition-string1: First string (optional)
:condition-string2: Second string (optional)
:condition-case-insensitive: Case insensitive
(default false)
current-status Run the build step if the current build status is
within the configured range
:condition-worst: Accepted values are SUCCESS,
UNSTABLE, FAILURE, NOT_BUILD, ABORTED
(default SUCCESS)
:condition-best: Accepted values are SUCCESS,
UNSTABLE, FAILURE, NOT_BUILD, ABORTED
(default SUCCESS)
shell Run the step if the shell command succeed
:condition-command: Shell command to execute
(optional)
windows-shell Similar to shell, except that commands will be
executed by cmd, under Windows
:condition-command: Command to execute (optional)
file-exists Run the step if a file exists
:condition-filename: Check existence of this file
(required)
:condition-basedir: If condition-filename is
relative, it will be considered relative to
either `workspace`, `artifact-directory`,
or `jenkins-home`. (default 'workspace')
files-match Run if one or more files match the selectors.
:include-pattern: (list str) List of Includes
Patterns. Since the separator in the patterns is
hardcoded as ',', any use of ',' would need
escaping. (optional)
:exclude-pattern: (list str) List of Excludes
Patterns. Since the separator in the patterns is
hardcoded as ',', any use of ',' would need
escaping. (optional)
:condition-basedir: Accepted values are `workspace`,
`artifact-directory`, or `jenkins-home`.
(default 'workspace')
num-comp Run if the numerical comparison is true.
:lhs: Left Hand Side. Must evaluate to a number.
(required)
:rhs: Right Hand Side. Must evaluate to a number.
(required)
:comparator: Accepted values are `less-than`,
`greater-than`, `equal`, `not-equal`,
`less-than-equal`, `greater-than-equal`.
(default 'less-than')
regex-match Run if the Expression matches the Label.
:regex: The regular expression used to match the label
(optional)
:label: The label that will be tested by the regular
expression. (optional)
time Only run during a certain period of the day.
:earliest-hour: Starting hour (default "09")
:earliest-min: Starting min (default "00")
:latest-hour: Ending hour (default "17")
:latest-min: Ending min (default "30")
:use-build-time: (bool) Use the build time instead of
the the time that the condition is evaluated.
(default false)
not Run the step if the inverse of the condition-operand
is true
:condition-operand: Condition to evaluate. Can be
any supported conditional-step condition. (required)
and Run the step if logical and of all conditional-operands
is true
:condition-operands: (list) Conditions to evaluate.
Can be any supported conditional-step condition.
(required)
or Run the step if logical or of all conditional-operands
is true
:condition-operands: (list) Conditions to evaluate.
Can be any supported conditional-step condition.
(required)
================== ====================================================
Examples:
.. literalinclude::
/../../tests/builders/fixtures/conditional-step-multiple-steps.yaml
:language: yaml
.. literalinclude::
/../../tests/builders/fixtures/conditional-step-success-failure.yaml
:language: yaml
.. literalinclude::
/../../tests/builders/fixtures/conditional-step-not-file-exists.yaml
:language: yaml
.. literalinclude::
/../../tests/builders/fixtures/conditional-step-day-of-week001.yaml
:language: yaml
.. literalinclude::
/../../tests/builders/fixtures/conditional-step-day-of-week003.yaml
:language: yaml
.. literalinclude::
/../../tests/builders/fixtures/conditional-step-time.yaml
:language: yaml
.. literalinclude::
/../../tests/builders/fixtures/conditional-step-regex-match.yaml
:language: yaml
.. literalinclude::
/../../tests/builders/fixtures/conditional-step-or.yaml
:language: yaml
.. literalinclude::
/../../tests/builders/fixtures/conditional-step-and.yaml
:language: yaml
"""
def build_condition(cdata, cond_root_tag, condition_tag):
kind = cdata['condition-kind']
ctag = XML.SubElement(cond_root_tag, condition_tag)
core_prefix = 'org.jenkins_ci.plugins.run_condition.core.'
logic_prefix = 'org.jenkins_ci.plugins.run_condition.logic.'
if kind == "always":
ctag.set('class', core_prefix + 'AlwaysRun')
elif kind == "never":
ctag.set('class', core_prefix + 'NeverRun')
elif kind == "boolean-expression":
ctag.set('class', core_prefix + 'BooleanCondition')
try:
XML.SubElement(ctag, "token").text = (
cdata['condition-expression'])
except KeyError:
raise MissingAttributeError('condition-expression')
elif kind == "build-cause":
ctag.set('class', core_prefix + 'CauseCondition')
cause_list = ('USER_CAUSE', 'SCM_CAUSE', 'TIMER_CAUSE',
'CLI_CAUSE', 'REMOTE_CAUSE', 'UPSTREAM_CAUSE',
'FS_CAUSE', 'URL_CAUSE', 'IVY_CAUSE',
'SCRIPT_CAUSE', 'BUILDRESULT_CAUSE')
cause_name = cdata.get('cause', 'USER_CAUSE')
if cause_name not in cause_list:
raise InvalidAttributeError('cause', cause_name, cause_list)
XML.SubElement(ctag, "buildCause").text = cause_name
XML.SubElement(ctag, "exclusiveCause").text = str(cdata.get(
'exclusive-cause', False)).lower()
elif kind == "day-of-week":
ctag.set('class', core_prefix + 'DayCondition')
day_selector_class_prefix = core_prefix + 'DayCondition$'
day_selector_classes = {
'weekend': day_selector_class_prefix + 'Weekend',
'weekday': day_selector_class_prefix + 'Weekday',
'select-days': day_selector_class_prefix + 'SelectDays',
}
day_selector = cdata.get('day-selector', 'weekend')
if day_selector not in day_selector_classes:
raise InvalidAttributeError('day-selector', day_selector,
day_selector_classes)
day_selector_tag = XML.SubElement(ctag, "daySelector")
day_selector_tag.set('class', day_selector_classes[day_selector])
if day_selector == "select-days":
days_tag = XML.SubElement(day_selector_tag, "days")
day_tag_text = ('org.jenkins__ci.plugins.run__condition.'
'core.DayCondition_-Day')
inp_days = cdata.get('days') if cdata.get('days') else {}
days = ['SUN', 'MON', 'TUES', 'WED', 'THURS', 'FRI', 'SAT']
for day_no, day in enumerate(days, 1):
day_tag = XML.SubElement(days_tag, day_tag_text)
XML.SubElement(day_tag, "day").text = str(day_no)
XML.SubElement(day_tag, "selected").text = str(
inp_days.get(day, False)).lower()
XML.SubElement(ctag, "useBuildTime").text = str(cdata.get(
'use-build-time', False)).lower()
elif kind == "execution-node":
ctag.set('class', core_prefix + 'NodeCondition')
allowed_nodes_tag = XML.SubElement(ctag, "allowedNodes")
try:
nodes_list = cdata['nodes']
except KeyError:
raise MissingAttributeError('nodes')
for node in nodes_list:
node_tag = XML.SubElement(allowed_nodes_tag, "string")
node_tag.text = node
elif kind == "strings-match":
ctag.set('class', core_prefix + 'StringsMatchCondition')
XML.SubElement(ctag, "arg1").text = cdata.get(
'condition-string1', '')
XML.SubElement(ctag, "arg2").text = cdata.get(
'condition-string2', '')
XML.SubElement(ctag, "ignoreCase").text = str(cdata.get(
'condition-case-insensitive', False)).lower()
elif kind == "current-status":
ctag.set('class', core_prefix + 'StatusCondition')
wr = XML.SubElement(ctag, 'worstResult')
wr_name = cdata.get('condition-worst', 'SUCCESS')
if wr_name not in hudson_model.THRESHOLDS:
raise InvalidAttributeError('condition-worst', wr_name,
hudson_model.THRESHOLDS.keys())
wr_threshold = hudson_model.THRESHOLDS[wr_name]
XML.SubElement(wr, "name").text = wr_threshold['name']
XML.SubElement(wr, "ordinal").text = wr_threshold['ordinal']
XML.SubElement(wr, "color").text = wr_threshold['color']
XML.SubElement(wr, "completeBuild").text = str(
wr_threshold['complete']).lower()
br = XML.SubElement(ctag, 'bestResult')
br_name = cdata.get('condition-best', 'SUCCESS')
if br_name not in hudson_model.THRESHOLDS:
raise InvalidAttributeError('condition-best', br_name,
hudson_model.THRESHOLDS.keys())
br_threshold = hudson_model.THRESHOLDS[br_name]
XML.SubElement(br, "name").text = br_threshold['name']
XML.SubElement(br, "ordinal").text = br_threshold['ordinal']
XML.SubElement(br, "color").text = br_threshold['color']
XML.SubElement(br, "completeBuild").text = str(
wr_threshold['complete']).lower()
elif kind == "shell":
ctag.set('class',
'org.jenkins_ci.plugins.run_condition.contributed.'
'ShellCondition')
XML.SubElement(ctag, "command").text = cdata.get(
'condition-command', '')
elif kind == "windows-shell":
ctag.set('class',
'org.jenkins_ci.plugins.run_condition.contributed.'
'BatchFileCondition')
XML.SubElement(ctag, "command").text = cdata.get(
'condition-command', '')
elif kind == "file-exists" or kind == "files-match":
if kind == "file-exists":
ctag.set('class', core_prefix + 'FileExistsCondition')
try:
XML.SubElement(ctag, "file").text = (
cdata['condition-filename'])
except KeyError:
raise MissingAttributeError('condition-filename')
else:
ctag.set('class', core_prefix + 'FilesMatchCondition')
XML.SubElement(ctag, "includes").text = ",".join(cdata.get(
'include-pattern', ''))
XML.SubElement(ctag, "excludes").text = ",".join(cdata.get(
'exclude-pattern', ''))
basedir_class_prefix = ('org.jenkins_ci.plugins.run_condition.'
'common.BaseDirectory$')
basedir_classes = {
'workspace': basedir_class_prefix + 'Workspace',
'artifact-directory': basedir_class_prefix + 'ArtifactsDir',
'jenkins-home': basedir_class_prefix + 'JenkinsHome'
}
basedir = cdata.get('condition-basedir', 'workspace')
if basedir not in basedir_classes:
raise InvalidAttributeError('condition-basedir', basedir,
basedir_classes)
XML.SubElement(ctag, "baseDir").set('class',
basedir_classes[basedir])
elif kind == "num-comp":
ctag.set('class', core_prefix + 'NumericalComparisonCondition')
try:
XML.SubElement(ctag, "lhs").text = cdata['lhs']
XML.SubElement(ctag, "rhs").text = cdata['rhs']
except KeyError as e:
raise MissingAttributeError(e.args[0])
comp_class_prefix = core_prefix + 'NumericalComparisonCondition$'
comp_classes = {
'less-than': comp_class_prefix + 'LessThan',
'greater-than': comp_class_prefix + 'GreaterThan',
'equal': comp_class_prefix + 'EqualTo',
'not-equal': comp_class_prefix + 'NotEqualTo',
'less-than-equal': comp_class_prefix + 'LessThanOrEqualTo',
'greater-than-equal': comp_class_prefix +
'GreaterThanOrEqualTo'
}
comp = cdata.get('comparator', 'less-than')
if comp not in comp_classes:
raise InvalidAttributeError('comparator', comp, comp_classes)
XML.SubElement(ctag, "comparator").set('class',
comp_classes[comp])
elif kind == "regex-match":
ctag.set('class', core_prefix + 'ExpressionCondition')
XML.SubElement(ctag, "expression").text = cdata.get('regex', '')
XML.SubElement(ctag, "label").text = cdata.get('label', '')
elif kind == "time":
ctag.set('class', core_prefix + 'TimeCondition')
XML.SubElement(ctag, "earliestHours").text = cdata.get(
'earliest-hour', '09')
XML.SubElement(ctag, "earliestMinutes").text = cdata.get(
'earliest-min', '00')
XML.SubElement(ctag, "latestHours").text = cdata.get(
'latest-hour', '17')
XML.SubElement(ctag, "latestMinutes").text = cdata.get(
'latest-min', '30')
XML.SubElement(ctag, "useBuildTime").text = str(cdata.get(
'use-build-time', False)).lower()
elif kind == "not":
ctag.set('class', logic_prefix + 'Not')
try:
notcondition = cdata['condition-operand']
except KeyError:
raise MissingAttributeError('condition-operand')
build_condition(notcondition, ctag, "condition")
elif kind == "and" or "or":
if kind == "and":
ctag.set('class', logic_prefix + 'And')
else:
ctag.set('class', logic_prefix + 'Or')
conditions_tag = XML.SubElement(ctag, "conditions")
container_tag_text = ('org.jenkins__ci.plugins.run__condition.'
'logic.ConditionContainer')
try:
conditions_list = cdata['condition-operands']
except KeyError:
raise MissingAttributeError('condition-operands')
for condition in conditions_list:
conditions_container_tag = XML.SubElement(conditions_tag,
container_tag_text)
build_condition(condition, conditions_container_tag,
"condition")
def build_step(parent, step):
for edited_node in create_builders(registry, step):
if not has_multiple_steps:
edited_node.set('class', edited_node.tag)
edited_node.tag = 'buildStep'
parent.append(edited_node)
cond_builder_tag = ('org.jenkinsci.plugins.conditionalbuildstep.'
'singlestep.SingleConditionalBuilder')
cond_builders_tag = ('org.jenkinsci.plugins.conditionalbuildstep.'
'ConditionalBuilder')
steps = data['steps']
has_multiple_steps = len(steps) > 1
if has_multiple_steps:
root_tag = XML.SubElement(xml_parent, cond_builders_tag)
steps_parent = XML.SubElement(root_tag, "conditionalbuilders")
condition_tag = "runCondition"
else:
root_tag = XML.SubElement(xml_parent, cond_builder_tag)
steps_parent = root_tag
condition_tag = "condition"
build_condition(data, root_tag, condition_tag)
evaluation_classes_pkg = 'org.jenkins_ci.plugins.run_condition'
evaluation_classes = {
'fail': evaluation_classes_pkg + '.BuildStepRunner$Fail',
'mark-unstable': evaluation_classes_pkg + '.BuildStepRunner$Unstable',
'run-and-mark-unstable': evaluation_classes_pkg +
'.BuildStepRunner$RunUnstable',
'run': evaluation_classes_pkg + '.BuildStepRunner$Run',
'dont-run': evaluation_classes_pkg + '.BuildStepRunner$DontRun',
}
evaluation_class = evaluation_classes[data.get('on-evaluation-failure',
'fail')]
XML.SubElement(root_tag, "runner").set('class',
evaluation_class)
for step in steps:
build_step(steps_parent, step)
def maven_builder(registry, xml_parent, data):
"""yaml: maven-builder
Execute Maven3 builder
Allows your build jobs to deploy artifacts automatically to Artifactory.
Requires the Jenkins :jenkins-wiki:`Artifactory Plugin
<Artifactory+Plugin>`.
:arg str name: Name of maven installation from the configuration (required)
:arg str pom: Location of pom.xml (default 'pom.xml')
:arg str goals: Goals to execute (required)
:arg str maven-opts: Additional options for maven (default '')
Example:
.. literalinclude:: /../../tests/builders/fixtures/maven-builder001.yaml
:language: yaml
"""
maven = XML.SubElement(xml_parent, 'org.jfrog.hudson.maven3.Maven3Builder')
mapping = [
('name', 'mavenName', None),
('goals', 'goals', None),
('pom', 'rootPom', 'pom.xml'),
('maven-opts', 'mavenOpts', ''),
]
convert_mapping_to_xml(maven, data, mapping, fail_required=True)
def maven_target(registry, xml_parent, data):
"""yaml: maven-target
Execute top-level Maven targets.
Requires the Jenkins :jenkins-wiki:`Config File Provider Plugin
<Config+File+Provider+Plugin>` for the Config File Provider "settings"
and "global-settings" config.
:arg str goals: Goals to execute
:arg str properties: Properties for maven, can have multiples
:arg str pom: Location of pom.xml (default 'pom.xml')
:arg bool private-repository: Use private maven repository for this
job (default false)
:arg str maven-version: Installation of maven which should be used
(optional)
:arg str java-opts: java options for maven, can have multiples,
must be in quotes (optional)
:arg str settings: Path to use as user settings.xml
It is possible to provide a ConfigFileProvider settings file, such as
see CFP Example below. (optional)
:arg str settings-type: Type of settings file file|cfp. (default file)
:arg str global-settings: Path to use as global settings.xml
It is possible to provide a ConfigFileProvider settings file, such as
see CFP Example below. (optional)
:arg str global-settings-type: Type of settings file file|cfp. (default
file)
Example:
.. literalinclude:: /../../tests/builders/fixtures/maven-target-doc.yaml
:language: yaml
CFP Example:
.. literalinclude:: /../../tests/builders/fixtures/maven-target002.yaml
:language: yaml
"""
maven = XML.SubElement(xml_parent, 'hudson.tasks.Maven')
XML.SubElement(maven, 'targets').text = data['goals']
prop_string = '\n'.join(data.get('properties', []))
XML.SubElement(maven, 'properties').text = prop_string
if 'maven-version' in data:
XML.SubElement(maven, 'mavenName').text = str(data['maven-version'])
if 'pom' in data:
XML.SubElement(maven, 'pom').text = str(data['pom'])
use_private = str(data.get('private-repository', False)).lower()
XML.SubElement(maven, 'usePrivateRepository').text = use_private
if 'java-opts' in data:
javaoptions = ' '.join(data.get('java-opts', []))
XML.SubElement(maven, 'jvmOptions').text = javaoptions
config_file_provider_settings(maven, data)
def multijob(registry, xml_parent, data):
"""yaml: multijob
Define a multijob phase. Requires the Jenkins
:jenkins-wiki:`Multijob Plugin <Multijob+Plugin>`.
This builder may only be used in
:py:class:`jenkins_jobs.modules.project_multijob.MultiJob` projects.
:arg str name: MultiJob phase name
:arg str condition: when to trigger the other job.
Can be: 'SUCCESSFUL', 'UNSTABLE', 'COMPLETED', 'FAILURE', 'ALWAYS'.
(default 'SUCCESSFUL')
:arg list projects: list of projects to include in the MultiJob phase
:Project:
* **name** (`str`) -- Project name
* **current-parameters** (`bool`) -- Pass current build
parameters to the other job (default false)
* **node-label-name** (`str`) -- Define a list of nodes
on which the job should be allowed to be executed on.
Requires NodeLabel Parameter Plugin (optional)
* **node-label** (`str`) -- Define a label
of 'Restrict where this project can be run' on the fly.
Requires NodeLabel Parameter Plugin (optional)
* **node-parameters** (`bool`) -- Use the same Node for
the triggered builds that was used for this build. (optional)
* **git-revision** (`bool`) -- Pass current git-revision
to the other job (default false)
* **property-file** (`str`) -- Pass properties from file
to the other job (optional)
* **predefined-parameters** (`str`) -- Pass predefined
parameters to the other job (optional)
* **abort-all-job** (`bool`) -- Kill allsubs job and the phase job,
if this subjob is killed (default false)
* **enable-condition** (`str`) -- Condition to run the
job in groovy script format (optional)
* **kill-phase-on** (`str`) -- Stop the phase execution
on specific job status. Can be 'FAILURE', 'UNSTABLE',
'NEVER'. (optional)
* **restrict-matrix-project** (`str`) -- Filter that
restricts the subset of the combinations that the
downstream project will run (optional)
* **retry** (`dict`): Enable retry strategy (optional)
:retry:
* **max-retry** (`int`) -- Max number of retries
(default 0)
* **strategy-path** (`str`) -- Parsing rules path
(required)
Example:
.. literalinclude:: /../../tests/builders/fixtures/multibuild.yaml
:language: yaml
"""
builder = XML.SubElement(xml_parent, 'com.tikal.jenkins.plugins.multijob.'
'MultiJobBuilder')
XML.SubElement(builder, 'phaseName').text = data['name']
condition = data.get('condition', 'SUCCESSFUL')
conditions_available = ('SUCCESSFUL', 'UNSTABLE', 'COMPLETED', 'FAILURE',
'ALWAYS')
if condition not in conditions_available:
raise JenkinsJobsException('Multijob condition must be one of: %s.'
% ', '.join(conditions_available))
XML.SubElement(builder, 'continuationCondition').text = condition
phaseJobs = XML.SubElement(builder, 'phaseJobs')
kill_status_list = ('FAILURE', 'UNSTABLE', 'NEVER')
for project in data.get('projects', []):
phaseJob = XML.SubElement(phaseJobs, 'com.tikal.jenkins.plugins.'
'multijob.PhaseJobsConfig')
XML.SubElement(phaseJob, 'jobName').text = project['name']
# Pass through the current build params
currParams = str(project.get('current-parameters', False)).lower()
XML.SubElement(phaseJob, 'currParams').text = currParams
# Pass through other params
configs = XML.SubElement(phaseJob, 'configs')
nodeLabelName = project.get('node-label-name')
nodeLabel = project.get('node-label')
if (nodeLabelName and nodeLabel):
node = XML.SubElement(
configs, 'org.jvnet.jenkins.plugins.nodelabelparameter.'
'parameterizedtrigger.NodeLabelBuildParameter')
XML.SubElement(node, 'name').text = nodeLabelName
XML.SubElement(node, 'nodeLabel').text = nodeLabel
# Node parameter
if project.get('node-parameters', False):
XML.SubElement(configs, 'hudson.plugins.parameterizedtrigger.'
'NodeParameters')
# Git Revision
if project.get('git-revision', False):
param = XML.SubElement(configs,
'hudson.plugins.git.'
'GitRevisionBuildParameters')
combine = XML.SubElement(param, 'combineQueuedCommits')
combine.text = 'false'
# Properties File
properties_file = project.get('property-file', False)
if properties_file:
param = XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'FileBuildParameters')
propertiesFile = XML.SubElement(param, 'propertiesFile')
propertiesFile.text = properties_file
failOnMissing = XML.SubElement(param, 'failTriggerOnMissing')
failOnMissing.text = 'true'
# Predefined Parameters
predefined_parameters = project.get('predefined-parameters', False)
if predefined_parameters:
param = XML.SubElement(configs,
'hudson.plugins.parameterizedtrigger.'
'PredefinedBuildParameters')
properties = XML.SubElement(param, 'properties')
properties.text = predefined_parameters
# Abort all other job
abortAllJob = str(project.get('abort-all-job', False)).lower()
XML.SubElement(phaseJob, 'abortAllJob').text = abortAllJob
# Retry job
retry = project.get('retry', False)
if retry:
try:
rules_path = str(retry['strategy-path'])
XML.SubElement(phaseJob, 'parsingRulesPath').text = rules_path
except KeyError:
raise MissingAttributeError('strategy-path')
max_retry = retry.get('max-retry', 0)
XML.SubElement(phaseJob, 'maxRetries').text = str(int(max_retry))
XML.SubElement(phaseJob, 'enableRetryStrategy').text = 'true'
else:
XML.SubElement(phaseJob, 'enableRetryStrategy').text = 'false'
# Restrict matrix jobs to a subset
if project.get('restrict-matrix-project') is not None:
subset = XML.SubElement(
configs, 'hudson.plugins.parameterizedtrigger.'
'matrix.MatrixSubsetBuildParameters')
XML.SubElement(
subset, 'filter').text = project['restrict-matrix-project']
# Enable Condition
enable_condition = project.get('enable-condition')
if enable_condition is not None:
XML.SubElement(
phaseJob,
'enableCondition'
).text = 'true'
XML.SubElement(
phaseJob,
'condition'
).text = enable_condition
# Kill phase on job status
kill_status = project.get('kill-phase-on')
if kill_status is not None:
kill_status = kill_status.upper()
if kill_status not in kill_status_list:
raise JenkinsJobsException(
'multijob kill-phase-on must be one of: %s'
+ ','.join(kill_status_list))
XML.SubElement(
phaseJob,
'killPhaseOnJobResultCondition'
).text = kill_status
def config_file_provider(registry, xml_parent, data):
"""yaml: config-file-provider
Provide configuration files (i.e., settings.xml for maven etc.)
which will be copied to the job's workspace.
Requires the Jenkins :jenkins-wiki:`Config File Provider Plugin
<Config+File+Provider+Plugin>`.
:arg list files: List of managed config files made up of three
parameters
:files:
* **file-id** (`str`) -- The identifier for the managed config
file
* **target** (`str`) -- Define where the file should be created
(default '')
* **variable** (`str`) -- Define an environment variable to be
used (default '')
Example:
.. literalinclude::
../../tests/builders/fixtures/config-file-provider01.yaml
:language: yaml
"""
cfp = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.configfiles.builder.'
'ConfigFileBuildStep')
cfp.set('plugin', 'config-file-provider')
config_file_provider_builder(cfp, data)
def grails(registry, xml_parent, data):
"""yaml: grails
Execute a grails build step. Requires the :jenkins-wiki:`Jenkins Grails
Plugin <Grails+Plugin>`.
:arg bool use-wrapper: Use a grails wrapper (default false)
:arg str name: Select a grails installation to use (default '(Default)')
:arg bool force-upgrade: Run 'grails upgrade --non-interactive'
first (default false)
:arg bool non-interactive: append --non-interactive to all build targets
(default false)
:arg str targets: Specify target(s) to run separated by spaces (required)
:arg str server-port: Specify a value for the server.port system
property (default '')
:arg str work-dir: Specify a value for the grails.work.dir system
property (default '')
:arg str project-dir: Specify a value for the grails.project.work.dir
system property (default '')
:arg str base-dir: Specify a path to the root of the Grails
project (default '')
:arg str properties: Additional system properties to set (default '')
:arg bool plain-output: append --plain-output to all build targets
(default false)
:arg bool stack-trace: append --stack-trace to all build targets
(default false)
:arg bool verbose: append --verbose to all build targets
(default false)
:arg bool refresh-dependencies: append --refresh-dependencies to all
build targets (default false)
Full Example:
.. literalinclude:: ../../tests/builders/fixtures/grails-full.yaml
:language: yaml
Minimal Example:
.. literalinclude:: ../../tests/builders/fixtures/grails-minimal.yaml
:language: yaml
"""
grails = XML.SubElement(xml_parent, 'com.g2one.hudson.grails.'
'GrailsBuilder')
grails.set('plugin', 'grails')
mappings = [
('targets', 'targets', None),
('name', 'name', '(Default)'),
('work-dir', 'grailsWorkDir', ''),
('project-dir', 'projectWorkDir', ''),
('base-dir', 'projectBaseDir', ''),
('server-port', 'serverPort', ''),
('properties', 'properties', ''),
('force-upgrade', 'forceUpgrade', False),
('non-interactive', 'nonInteractive', False),
('use-wrapper', 'useWrapper', False),
('plain-output', 'plainOutput', False),
('stack-trace', 'stackTrace', False),
('verbose', 'verbose', False),
('refresh-dependencies', 'refreshDependencies', False),
]
convert_mapping_to_xml(grails, data, mappings, fail_required=True)
def sbt(registry, xml_parent, data):
"""yaml: sbt
Execute a sbt build step. Requires the Jenkins :jenkins-wiki:`Sbt Plugin
<sbt+plugin>`.
:arg str name: Select a sbt installation to use. If no name is
provided, the first in the list of defined SBT builders will be
used. (default to first in list)
:arg str jvm-flags: Parameters to pass to the JVM (default '')
:arg str actions: Select the sbt tasks to execute (default '')
:arg str sbt-flags: Add flags to SBT launcher
(default '-Dsbt.log.noformat=true')
:arg str subdir-path: Path relative to workspace to run sbt in
(default '')
Example:
.. literalinclude:: ../../tests/builders/fixtures/sbt.yaml
:language: yaml
"""
sbt = XML.SubElement(xml_parent, 'org.jvnet.hudson.plugins.'
'SbtPluginBuilder')
mappings = [
('name', 'name', ''),
('jvm-flags', 'jvmFlags', ''),
('sbt-flags', 'sbtFlags', '-Dsbt.log.noformat=true'),
('actions', 'actions', ''),
('subdir-path', 'subdirPath', ''),
]
convert_mapping_to_xml(sbt, data, mappings, fail_required=True)
def critical_block_start(registry, xml_parent, data):
"""yaml: critical-block-start
Designate the start of a critical block. Must be used in conjuction with
critical-block-end.
Must also add a build wrapper (exclusion), specifying the resources that
control the critical block. Otherwise, this will have no effect.
Requires Jenkins :jenkins-wiki:`Exclusion Plugin <Exclusion-Plugin>`.
Example:
.. literalinclude::
../../tests/yamlparser/fixtures/critical_block_complete001.yaml
:language: yaml
"""
cbs = XML.SubElement(
xml_parent, 'org.jvnet.hudson.plugins.exclusion.CriticalBlockStart')
cbs.set('plugin', 'Exclusion')
def critical_block_end(registry, xml_parent, data):
"""yaml: critical-block-end
Designate the end of a critical block. Must be used in conjuction with
critical-block-start.
Must also add a build wrapper (exclusion), specifying the resources that
control the critical block. Otherwise, this will have no effect.
Requires Jenkins :jenkins-wiki:`Exclusion Plugin <Exclusion-Plugin>`.
Example:
.. literalinclude::
../../tests/yamlparser/fixtures/critical_block_complete001.yaml
:language: yaml
"""
cbs = XML.SubElement(
xml_parent, 'org.jvnet.hudson.plugins.exclusion.CriticalBlockEnd')
cbs.set('plugin', 'Exclusion')
def publish_over_ssh(registry, xml_parent, data):
"""yaml: publish-over-ssh
Send files or execute commands over SSH.
Requires the Jenkins :jenkins-wiki:`Publish over SSH Plugin
<Publish+Over+SSH+Plugin>`.
:arg str site: name of the ssh site
:arg str target: destination directory
:arg bool target-is-date-format: whether target is a date format. If true,
raw text should be quoted (default false)
:arg bool clean-remote: should the remote directory be deleted before
transferring files (default false)
:arg str source: source path specifier
:arg str command: a command to execute on the remote server (optional)
:arg int timeout: timeout in milliseconds for the Exec command (optional)
:arg bool use-pty: run the exec command in pseudo TTY (default false)
:arg str excludes: excluded file pattern (optional)
:arg str remove-prefix: prefix to remove from uploaded file paths
(optional)
:arg bool fail-on-error: fail the build if an error occurs (default false)
Example:
.. literalinclude:: /../../tests/builders/fixtures/publish-over-ssh.yaml
:language: yaml
"""
ssh(registry, xml_parent, data)
def saltstack(parser, xml_parent, data):
"""yaml: saltstack
Send a message to Salt API. Requires the :jenkins-wiki:`saltstack plugin
<saltstack-plugin>`.
:arg str servername: Salt master server name (required)
:arg str authtype: Authentication type ('pam' or 'ldap', default 'pam')
:arg str credentials: Credentials ID for which to authenticate to Salt
master (required)
:arg str target: Target minions (default '')
:arg str targettype: Target type ('glob', 'pcre', 'list', 'grain',
'pillar', 'nodegroup', 'range', or 'compound', default 'glob')
:arg str function: Function to execute (default '')
:arg str arguments: Salt function arguments (default '')
:arg str kwarguments: Salt keyword arguments (default '')
:arg bool saveoutput: Save Salt return data into environment variable
(default false)
:arg str clientinterface: Client interface type ('local', 'local-batch',
or 'runner', default 'local')
:arg bool wait: Wait for completion of command (default false)
:arg str polltime: Number of seconds to wait before polling job completion
status (default '')
:arg str batchsize: Salt batch size, absolute value or %-age (default 100%)
:arg str mods: Mods to runner (default '')
:arg bool setpillardata: Set Pillar data (default false)
:arg str pillarkey: Pillar key (default '')
:arg str pillarvalue: Pillar value (default '')
Minimal Example:
.. literalinclude:: ../../tests/builders/fixtures/saltstack-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: ../../tests/builders/fixtures/saltstack-full.yaml
:language: yaml
"""
saltstack = XML.SubElement(xml_parent, 'com.waytta.SaltAPIBuilder')
supported_auth_types = ['pam', 'ldap']
supported_target_types = ['glob', 'pcre', 'list', 'grain', 'pillar',
'nodegroup', 'range', 'compound']
supported_client_interfaces = ['local', 'local-batch', 'runner']
mapping = [
('servername', 'servername', None),
('credentials', 'credentialsId', None),
('authtype', 'authtype', 'pam', supported_auth_types),
('target', 'target', ''),
('targettype', 'targettype', 'glob', supported_target_types),
('clientinterface', 'clientInterface', 'local',
supported_client_interfaces),
('function', 'function', ''),
('arguments', 'arguments', ''),
('kwarguments', 'kwarguments', ''),
('setpillardata', 'usePillar', False),
('pillarkey', 'pillarkey', ''),
('pillarvalue', 'pillarvalue', ''),
('wait', 'blockbuild', False),
('polltime', 'jobPollTime', ''),
('batchsize', 'batchSize', '100%'),
('mods', 'mods', ''),
('saveoutput', 'saveEnvVar', False)
]
helpers.convert_mapping_to_xml(saltstack, data, mapping,
fail_required=True)
clientInterface = data.get('clientinterface', 'local')
blockbuild = str(data.get('wait', False)).lower()
jobPollTime = str(data.get('polltime', ''))
batchSize = data.get('batchsize', '100%')
mods = data.get('mods', '')
usePillar = str(data.get('setpillardata', False)).lower()
# Build the clientInterfaces structure, based on the
# clientinterface setting
clientInterfaces = XML.SubElement(saltstack, 'clientInterfaces')
XML.SubElement(clientInterfaces, 'nullObject').text = 'false'
ci_attrib = {
'class': 'org.apache.commons.collections.map.ListOrderedMap',
'serialization': 'custom'
}
properties = XML.SubElement(clientInterfaces, 'properties', ci_attrib)
lomElement = 'org.apache.commons.collections.map.ListOrderedMap'
listOrderedMap = XML.SubElement(properties, lomElement)
default = XML.SubElement(listOrderedMap, 'default')
ordered_map = XML.SubElement(listOrderedMap, 'map')
insertOrder = XML.SubElement(default, 'insertOrder')
ci_config = []
if clientInterface == 'local':
ci_config = [
('blockbuild', blockbuild),
('jobPollTime', jobPollTime),
('clientInterface', clientInterface)
]
elif clientInterface == 'local-batch':
ci_config = [
('batchSize', batchSize),
('clientInterface', clientInterface)
]
elif clientInterface == 'runner':
ci_config = [
('mods', mods),
('clientInterface', clientInterface)
]
if usePillar == 'true':
ci_config.append(('usePillar', usePillar))
pillar_cfg = [
('pillarkey', data.get('pillarkey')),
('pillarvalue', data.get('pillarvalue'))
]
for emt, value in ci_config:
XML.SubElement(insertOrder, 'string').text = emt
entry = XML.SubElement(ordered_map, 'entry')
XML.SubElement(entry, 'string').text = emt
# Special handling when usePillar == true, requires additional
# structure in the builder XML
if emt != 'usePillar':
XML.SubElement(entry, 'string').text = value
else:
jsonobj = XML.SubElement(entry, 'net.sf.json.JSONObject')
XML.SubElement(jsonobj, 'nullObject').text = 'false'
pillarProps = XML.SubElement(jsonobj, 'properties', ci_attrib)
XML.SubElement(pillarProps, 'unserializable-parents')
pillarLom = XML.SubElement(pillarProps, lomElement)
pillarDefault = XML.SubElement(pillarLom, 'default')
pillarMap = XML.SubElement(pillarLom, 'map')
pillarInsertOrder = XML.SubElement(pillarDefault, 'insertOrder')
for pemt, value in pillar_cfg:
XML.SubElement(pillarInsertOrder, 'string').text = pemt
pillarEntry = XML.SubElement(pillarMap, 'entry')
XML.SubElement(pillarEntry, 'string').text = pemt
XML.SubElement(pillarEntry, 'string').text = value
class Builders(jenkins_jobs.modules.base.Base):
sequence = 60
component_type = 'builder'
component_list_type = 'builders'
def gen_xml(self, xml_parent, data):
for alias in ['prebuilders', 'builders', 'postbuilders']:
if alias in data:
builders = XML.SubElement(xml_parent, alias)
for builder in data[alias]:
self.registry.dispatch('builder', builders, builder)
# Make sure freestyle projects always have a <builders> entry
# or Jenkins v1.472 (at least) will NPE.
project_type = data.get('project-type', 'freestyle')
if project_type in ('freestyle', 'matrix') and 'builders' not in data:
XML.SubElement(xml_parent, 'builders')
def shining_panda(registry, xml_parent, data):
"""yaml: shining-panda
Execute a command inside various python environments. Requires the Jenkins
:jenkins-wiki:`ShiningPanda plugin <ShiningPanda+Plugin>`.
:arg str build-environment: Building environment to set up (required).
:build-environment values:
* **python**: Use a python installation configured in Jenkins.
* **custom**: Use a manually installed python.
* **virtualenv**: Create a virtualenv
For the **python** environment
:arg str python-version: Name of the python installation to use.
Must match one of the configured installations on server
configuration (default 'System-CPython-2.7')
For the **custom** environment:
:arg str home: path to the home folder of the custom installation
(required)
For the **virtualenv** environment:
:arg str python-version: Name of the python installation to use.
Must match one of the configured installations on server
configuration (default 'System-CPython-2.7')
:arg str name: Name of this virtualenv. Two virtualenv builders with
the same name will use the same virtualenv installation (optional)
:arg bool clear: If true, delete and recreate virtualenv on each build.
(default false)
:arg bool use-distribute: if true use distribute, if false use
setuptools. (default true)
:arg bool system-site-packages: if true, give access to the global
site-packages directory to the virtualenv. (default false)
Common to all environments:
:arg str nature: Nature of the command field. (default shell)
:nature values:
* **shell**: execute the Command contents with default shell
* **xshell**: like **shell** but performs platform conversion
first
* **python**: execute the Command contents with the Python
executable
:arg str command: The command to execute
:arg bool ignore-exit-code: mark the build as failure if any of the
commands exits with a non-zero exit code. (default false)
Examples:
.. literalinclude::
/../../tests/builders/fixtures/shining-panda-pythonenv.yaml
:language: yaml
.. literalinclude::
/../../tests/builders/fixtures/shining-panda-customenv.yaml
:language: yaml
.. literalinclude::
/../../tests/builders/fixtures/shining-panda-virtualenv.yaml
:language: yaml
"""
pluginelementpart = 'jenkins.plugins.shiningpanda.builders.'
buildenvdict = {'custom': 'CustomPythonBuilder',
'virtualenv': 'VirtualenvBuilder',
'python': 'PythonBuilder'}
envs = (buildenvdict.keys())
try:
buildenv = data['build-environment']
except KeyError:
raise MissingAttributeError('build-environment')
if buildenv not in envs:
raise InvalidAttributeError('build-environment', buildenv, envs)
t = XML.SubElement(xml_parent, '%s%s' %
(pluginelementpart, buildenvdict[buildenv]))
if buildenv in ('python', 'virtualenv'):
XML.SubElement(t, 'pythonName').text = data.get("python-version",
"System-CPython-2.7")
if buildenv in ('custom'):
try:
homevalue = data["home"]
except KeyError:
raise JenkinsJobsException("'home' argument is required for the"
" 'custom' environment")
XML.SubElement(t, 'home').text = homevalue
if buildenv in ('virtualenv'):
XML.SubElement(t, 'home').text = data.get("name", "")
clear = data.get("clear", False)
XML.SubElement(t, 'clear').text = str(clear).lower()
use_distribute = data.get('use-distribute', False)
XML.SubElement(t, 'useDistribute').text = str(use_distribute).lower()
system_site_packages = data.get('system-site-packages', False)
XML.SubElement(t, 'systemSitePackages').text = str(
system_site_packages).lower()
# Common arguments
nature = data.get('nature', 'shell')
naturetuple = ('shell', 'xshell', 'python')
if nature not in naturetuple:
raise InvalidAttributeError('nature', nature, naturetuple)
XML.SubElement(t, 'nature').text = nature
XML.SubElement(t, 'command').text = data.get("command", "")
ignore_exit_code = data.get('ignore-exit-code', False)
XML.SubElement(t, 'ignoreExitCode').text = str(ignore_exit_code).lower()
def tox(registry, xml_parent, data):
"""yaml: tox
Use tox to build a multi-configuration project. Requires the Jenkins
:jenkins-wiki:`ShiningPanda plugin <ShiningPanda+Plugin>`.
:arg str ini: The TOX configuration file path (default tox.ini)
:arg bool recreate: If true, create a new environment each time (default
false)
:arg str toxenv-pattern: The pattern used to build the TOXENV environment
variable. (optional)
Example:
.. literalinclude:: /../../tests/builders/fixtures/tox001.yaml
:language: yaml
"""
pluginelement = 'jenkins.plugins.shiningpanda.builders.ToxBuilder'
t = XML.SubElement(xml_parent, pluginelement)
mappings = [
('ini', 'toxIni', 'tox.ini'),
('recreate', 'recreate', False),
]
convert_mapping_to_xml(t, data, mappings, fail_required=True)
pattern = data.get('toxenv-pattern')
if pattern:
XML.SubElement(t, 'toxenvPattern').text = pattern
def managed_script(registry, xml_parent, data):
"""yaml: managed-script
This step allows to reference and execute a centrally managed
script within your build. Requires the Jenkins
:jenkins-wiki:`Managed Script Plugin <Managed+Script+Plugin>`.
:arg str script-id: Id of script to execute (required)
:arg str type: Type of managed file (default script)
:type values:
* **batch**: Execute managed windows batch
* **script**: Execute managed script
:arg list args: Arguments to be passed to referenced script
Example:
.. literalinclude:: /../../tests/builders/fixtures/managed-script.yaml
:language: yaml
.. literalinclude:: /../../tests/builders/fixtures/managed-winbatch.yaml
:language: yaml
"""
step_type = data.get('type', 'script').lower()
if step_type == 'script':
step = 'ScriptBuildStep'
script_tag = 'buildStepId'
elif step_type == 'batch':
step = 'WinBatchBuildStep'
script_tag = 'command'
else:
raise InvalidAttributeError('type', step_type, ['script', 'batch'])
ms = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.managedscripts.' + step)
try:
script_id = data['script-id']
except KeyError:
raise MissingAttributeError('script-id')
XML.SubElement(ms, script_tag).text = script_id
args = XML.SubElement(ms, 'buildStepArgs')
for arg in data.get('args', []):
XML.SubElement(args, 'string').text = arg
def cmake(registry, xml_parent, data):
"""yaml: cmake
Execute a CMake target. Requires the Jenkins :jenkins-wiki:`CMake Plugin
<CMake+Plugin>`.
This builder is compatible with both versions 2.x and 1.x of the
plugin. When specifying paramenters from both versions only the ones from
the installed version in Jenkins will be used, and the rest will be
ignored.
:arg str source-dir: the source code directory relative to the workspace
directory. (required)
:arg str build-type: Sets the "build type" option for CMake (default
"Debug").
:arg str preload-script: Path to a CMake preload script file. (optional)
:arg str other-arguments: Other arguments to be added to the CMake
call. (optional)
:arg bool clean-build-dir: If true, delete the build directory before each
build (default false).
:arg list generator: The makefile generator (default "Unix Makefiles").
:type Possible generators:
* **Borland Makefiles**
* **CodeBlocks - MinGW Makefiles**
* **CodeBlocks - Unix Makefiles**
* **Eclipse CDT4 - MinGW Makefiles**
* **Eclipse CDT4 - NMake Makefiles**
* **Eclipse CDT4 - Unix Makefiles**
* **MSYS Makefiles**
* **MinGW Makefiles**
* **NMake Makefiles**
* **Unix Makefiles**
* **Visual Studio 6**
* **Visual Studio 7 .NET 2003**
* **Visual Studio 8 2005**
* **Visual Studio 8 2005 Win64**
* **Visual Studio 9 2008**
* **Visual Studio 9 2008 Win64**
* **Watcom WMake**
:Version 2.x: Parameters that available only to versions 2.x of the plugin
* **working-dir** (`str`): The directory where the project will be
built in. Relative to the workspace directory. (optional)
* **installation-name** (`str`): The CMake installation to be used on
this builder. Use one defined in your Jenkins global configuration
page (default "InSearchPath").
* **build-tool-invocations** (`list`): list of build tool invocations
that will happen during the build:
:Build tool invocations:
* **use-cmake** (`str`) -- Whether to run the actual build tool
directly (by expanding ``$CMAKE_BUILD_TOOL``) or to have
cmake run the build tool (by invoking ``cmake --build
<dir>``) (default false).
* **arguments** (`str`) -- Specify arguments to pass to the
build tool or cmake (separated by spaces). Arguments may
contain spaces if they are enclosed in double
quotes. (optional)
* **environment-variables** (`str`) -- Specify extra
environment variables to pass to the build tool as
key-value pairs here. Each entry must be on its own line,
for example:
``DESTDIR=${WORKSPACE}/artifacts/dir``
``KEY=VALUE``
:Version 1.x: Parameters available only to versions 1.x of the plugin
* **build-dir** (`str`): The directory where the project will be built
in. Relative to the workspace directory. (optional)
* **install-dir** (`str`): The directory where the project will be
installed in, relative to the workspace directory. (optional)
* **build-type** (`list`): Sets the "build type" option. A custom type
different than the default ones specified on the CMake plugin can
also be set, which will be automatically used in the "Other Build
Type" option of the plugin. (default "Debug")
:Default types present in the CMake plugin:
* **Debug**
* **Release**
* **RelWithDebInfo**
* **MinSizeRel**
* **make-command** (`str`): The make command (default "make").
* **install-command** (`arg`): The install command (default "make
install").
* **custom-cmake-path** (`str`): Path to cmake executable. (optional)
* **clean-install-dir** (`bool`): If true, delete the install dir
before each build (default false).
Example (Versions 2.x):
.. literalinclude::
../../tests/builders/fixtures/cmake/version-2.0/complete-2.x.yaml
:language: yaml
Example (Versions 1.x):
.. literalinclude::
../../tests/builders/fixtures/cmake/version-1.10/complete-1.x.yaml
:language: yaml
"""
BUILD_TYPES = ['Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel']
cmake = XML.SubElement(xml_parent, 'hudson.plugins.cmake.CmakeBuilder')
source_dir = XML.SubElement(cmake, 'sourceDir')
try:
source_dir.text = data['source-dir']
except KeyError:
raise MissingAttributeError('source-dir')
XML.SubElement(cmake, 'generator').text = str(
data.get('generator', "Unix Makefiles"))
XML.SubElement(cmake, 'cleanBuild').text = str(
data.get('clean-build-dir', False)).lower()
plugin_info = registry.get_plugin_info("CMake plugin")
version = pkg_resources.parse_version(plugin_info.get("version", "1.0"))
# Version 2.x breaks compatibility. So parse the input data differently
# based on it:
if version >= pkg_resources.parse_version("2.0"):
if data.get('preload-script'):
XML.SubElement(cmake, 'preloadScript').text = str(
data.get('preload-script', ''))
XML.SubElement(cmake, 'workingDir').text = str(
data.get('working-dir', ''))
XML.SubElement(cmake, 'buildType').text = str(
data.get('build-type', 'Debug'))
XML.SubElement(cmake, 'installationName').text = str(
data.get('installation-name', 'InSearchPath'))
XML.SubElement(cmake, 'toolArgs').text = str(
data.get('other-arguments', ''))
tool_steps = XML.SubElement(cmake, 'toolSteps')
for step_data in data.get('build-tool-invocations', []):
tagname = 'hudson.plugins.cmake.BuildToolStep'
step = XML.SubElement(tool_steps, tagname)
XML.SubElement(step, 'withCmake').text = str(
step_data.get('use-cmake', False)).lower()
XML.SubElement(step, 'args').text = str(
step_data.get('arguments', ''))
XML.SubElement(step, 'vars').text = str(
step_data.get('environment-variables', ''))
else:
XML.SubElement(cmake, 'preloadScript').text = str(
data.get('preload-script', ''))
build_dir = XML.SubElement(cmake, 'buildDir')
build_dir.text = data.get('build-dir', '')
install_dir = XML.SubElement(cmake, 'installDir')
install_dir.text = data.get('install-dir', '')
# The options buildType and otherBuildType work together on the CMake
# plugin:
# * If the passed value is one of the predefined values, set buildType
# to it and otherBuildType to blank;
# * Otherwise, set otherBuildType to the value, and buildType to
# "Debug". The CMake plugin will ignore the buildType option.
#
# It is strange and confusing that the plugin author chose to do
# something like that instead of simply passing a string "buildType"
# option, so this was done to simplify it for the JJB user.
build_type = XML.SubElement(cmake, 'buildType')
build_type.text = data.get('build-type', BUILD_TYPES[0])
other_build_type = XML.SubElement(cmake, 'otherBuildType')
if(build_type.text not in BUILD_TYPES):
other_build_type.text = build_type.text
build_type.text = BUILD_TYPES[0]
else:
other_build_type.text = ''
make_command = XML.SubElement(cmake, 'makeCommand')
make_command.text = data.get('make-command', 'make')
install_command = XML.SubElement(cmake, 'installCommand')
install_command.text = data.get('install-command', 'make install')
other_cmake_args = XML.SubElement(cmake, 'cmakeArgs')
other_cmake_args.text = data.get('other-arguments', '')
custom_cmake_path = XML.SubElement(cmake, 'projectCmakePath')
custom_cmake_path.text = data.get('custom-cmake-path', '')
clean_install_dir = XML.SubElement(cmake, 'cleanInstallDir')
clean_install_dir.text = str(data.get('clean-install-dir',
False)).lower()
# The plugin generates this tag, but there doesn't seem to be anything
# that can be configurable by it. Let's keep it to maintain
# compatibility:
XML.SubElement(cmake, 'builderImpl')
def dsl(registry, xml_parent, data):
"""yaml: dsl
Process Job DSL
Requires the Jenkins :jenkins-wiki:`Job DSL plugin <Job+DSL+Plugin>`.
:arg str script-text: dsl script which is Groovy code (Required if targets
is not specified)
:arg str targets: Newline separated list of DSL scripts, located in the
Workspace. Can use wildcards like 'jobs/\*/\*/\*.groovy' (Required
if script-text is not specified)
:arg str ignore-existing: Ignore previously generated jobs and views
:arg str removed-job-action: Specifies what to do when a previously
generated job is not referenced anymore, can be 'IGNORE', 'DISABLE',
or 'DELETE' (default 'IGNORE')
:arg str removed-view-action: Specifies what to do when a previously
generated view is not referenced anymore, can be 'IGNORE' or 'DELETE'.
(default 'IGNORE')
:arg str lookup-strategy: Determines how relative job names in DSL
scripts are interpreted, can be 'JENKINS_ROOT' or 'SEED_JOB'.
(default 'JENKINS_ROOT')
:arg str additional-classpath: Newline separated list of additional
classpath entries for the Job DSL scripts. All entries must be
relative to the workspace root, e.g. build/classes/main. (optional)
Example:
.. literalinclude:: /../../tests/builders/fixtures/dsl001.yaml
:language: yaml
.. literalinclude:: /../../tests/builders/fixtures/dsl002.yaml
:language: yaml
"""
dsl = XML.SubElement(xml_parent,
'javaposse.jobdsl.plugin.ExecuteDslScripts')
if 'target' in data:
if 'targets' not in data:
logger.warning("Converting from old format of 'target' to new "
"name 'targets', please update your job "
"definitions.")
data['targets'] = data['target']
else:
logger.warning("Ignoring old argument 'target' in favour of new "
"format argument 'targets', please remove old "
"format.")
if data.get('script-text'):
XML.SubElement(dsl, 'scriptText').text = data.get('script-text')
XML.SubElement(dsl, 'usingScriptText').text = 'true'
elif data.get('targets'):
XML.SubElement(dsl, 'targets').text = data.get('targets')
XML.SubElement(dsl, 'usingScriptText').text = 'false'
else:
raise MissingAttributeError(['script-text', 'target'])
XML.SubElement(dsl, 'ignoreExisting').text = str(data.get(
'ignore-existing', False)).lower()
supportedJobActions = ['IGNORE', 'DISABLE', 'DELETE']
removedJobAction = data.get('removed-job-action',
supportedJobActions[0])
if removedJobAction not in supportedJobActions:
raise InvalidAttributeError('removed-job-action',
removedJobAction,
supportedJobActions)
XML.SubElement(dsl, 'removedJobAction').text = removedJobAction
supportedViewActions = ['IGNORE', 'DELETE']
removedViewAction = data.get('removed-view-action',
supportedViewActions[0])
if removedViewAction not in supportedViewActions:
raise InvalidAttributeError('removed-view-action',
removedViewAction,
supportedViewActions)
XML.SubElement(dsl, 'removedViewAction').text = removedViewAction
supportedLookupActions = ['JENKINS_ROOT', 'SEED_JOB']
lookupStrategy = data.get('lookup-strategy',
supportedLookupActions[0])
if lookupStrategy not in supportedLookupActions:
raise InvalidAttributeError('lookup-strategy',
lookupStrategy,
supportedLookupActions)
XML.SubElement(dsl, 'lookupStrategy').text = lookupStrategy
XML.SubElement(dsl, 'additionalClasspath').text = data.get(
'additional-classpath')
def github_notifier(registry, xml_parent, data):
"""yaml: github-notifier
Set pending build status on Github commit.
Requires the Jenkins :jenkins-wiki:`Github Plugin <GitHub+Plugin>`.
Example:
.. literalinclude:: /../../tests/builders/fixtures/github-notifier.yaml
:language: yaml
"""
XML.SubElement(xml_parent,
'com.cloudbees.jenkins.GitHubSetCommitStatusBuilder')
def scan_build(registry, xml_parent, data):
"""yaml: scan-build
This plugin allows you configure a build step that will execute the Clang
scan-build static analysis tool against an XCode project.
The scan-build report has to be generated in the directory
``${WORKSPACE}/clangScanBuildReports`` for the publisher to find it.
Requires the Jenkins :jenkins-wiki:`Clang Scan-Build Plugin
<Clang+Scan-Build+Plugin>`.
:arg str target: Provide the exact name of the XCode target you wish to
have compiled and analyzed (required)
:arg str target-sdk: Set the simulator version of a currently installed SDK
(default iphonesimulator)
:arg str config: Provide the XCode config you wish to execute scan-build
against (default Debug)
:arg str clang-install-name: Name of clang static analyzer to use (default
'')
:arg str xcode-sub-path: Path of XCode project relative to the workspace
(default '')
:arg str workspace: Name of workspace (default '')
:arg str scheme: Name of scheme (default '')
:arg str scan-build-args: Additional arguments to clang scan-build
(default --use-analyzer Xcode)
:arg str xcode-build-args: Additional arguments to XCode (default
-derivedDataPath $WORKSPACE/build)
:arg str report-folder: Folder where generated reports are located
(>=1.7) (default clangScanBuildReports)
Full Example:
.. literalinclude:: /../../tests/builders/fixtures/scan-build-full.yaml
:language: yaml
Minimal Example:
.. literalinclude::
/../../tests/builders/fixtures/scan-build-minimal.yaml
:language: yaml
"""
p = XML.SubElement(
xml_parent,
'jenkins.plugins.clangscanbuild.ClangScanBuildBuilder')
p.set('plugin', 'clang-scanbuild')
mappings = [
('target', 'target', None),
('target-sdk', 'targetSdk', 'iphonesimulator'),
('config', 'config', 'Debug'),
('clang-install-name', 'clangInstallationName', ''),
('xcode-sub-path', 'xcodeProjectSubPath', 'myProj/subfolder'),
('workspace', 'workspace', ''),
('scheme', 'scheme', ''),
('scan-build-args', 'scanbuildargs', '--use-analyzer Xcode'),
('xcode-build-args',
'xcodebuildargs',
'-derivedDataPath $WORKSPACE/build'),
('report-folder', 'outputFolderName', 'clangScanBuildReports'),
]
convert_mapping_to_xml(p, data, mappings, fail_required=True)
def ssh_builder(registry, xml_parent, data):
"""yaml: ssh-builder
Executes command on remote host
Requires the Jenkins :jenkins-wiki:`SSH plugin <SSH+plugin>`.
:arg str ssh-user-ip: user@ip:ssh_port of machine that was defined
in jenkins according to SSH plugin instructions
:arg str command: command to run on remote server
Example:
.. literalinclude:: /../../tests/builders/fixtures/ssh-builder.yaml
:language: yaml
"""
builder = XML.SubElement(
xml_parent, 'org.jvnet.hudson.plugins.SSHBuilder')
mapping = [
('ssh-user-ip', 'siteName', None),
('command', 'command', None),
]
convert_mapping_to_xml(builder, data, mapping, fail_required=True)
def sonar(registry, xml_parent, data):
"""yaml: sonar
Invoke standalone Sonar analysis.
Requires the Jenkins `Sonar Plugin.
<http://docs.sonarqube.org/display/SCAN/\
Analyzing+with+SonarQube+Scanner+for+Jenkins\
#AnalyzingwithSonarQubeScannerforJenkins-\
AnalyzingwiththeSonarQubeScanner>`_
:arg str sonar-name: Name of the Sonar installation.
:arg str task: Task to run. (default '')
:arg str project: Path to Sonar project properties file. (default '')
:arg str properties: Sonar configuration properties. (default '')
:arg str java-opts: Java options for Sonnar Runner. (default '')
:arg str additional-arguments: additional command line arguments
(default '')
:arg str jdk: JDK to use (inherited from the job if omitted). (optional)
Example:
.. literalinclude:: /../../tests/builders/fixtures/sonar.yaml
:language: yaml
"""
sonar = XML.SubElement(xml_parent,
'hudson.plugins.sonar.SonarRunnerBuilder')
sonar.set('plugin', 'sonar')
XML.SubElement(sonar, 'installationName').text = data['sonar-name']
mappings = [
('task', 'task', ''),
('project', 'project', ''),
('properties', 'properties', ''),
('java-opts', 'javaOpts', ''),
('additional-arguments', 'additionalArguments', ''),
]
convert_mapping_to_xml(sonar, data, mappings, fail_required=True)
if 'jdk' in data:
XML.SubElement(sonar, 'jdk').text = data['jdk']
def xcode(registry, xml_parent, data):
"""yaml: xcode
This step allows to execute an xcode build step. Requires the Jenkins
:jenkins-wiki:`Xcode Plugin <Xcode+Plugin>`.
:arg str developer-profile: the jenkins credential id for a
ios developer profile. (optional)
:arg bool clean-build: if true will delete the build directories
before invoking the build. (default false)
:arg bool clean-test-reports: UNKNOWN. (default false)
:arg bool archive: if true will generate an xcarchive of the specified
scheme. A workspace and scheme are are also needed for archives.
(default false)
:arg str configuration: This is the name of the configuration
as defined in the Xcode project. (default 'Release')
:arg str configuration-directory: The value to use for
CONFIGURATION_BUILD_DIR setting. (default '')
:arg str target: Leave empty for all targets. (default '')
:arg str sdk: Leave empty for default SDK. (default '')
:arg str symroot: Leave empty for default SYMROOT. (default '')
:arg str project-path: Relative path within the workspace
that contains the xcode project file(s). (default '')
:arg str project-file: Only needed if there is more than one
project file in the Xcode Project Directory. (default '')
:arg str build-arguments: Extra commandline arguments provided
to the xcode builder. (default '')
:arg str schema: Only needed if you want to compile for a
specific schema instead of a target. (default '')
:arg str workspace: Only needed if you want to compile a
workspace instead of a project. (default '')
:arg str profile: The relative path to the mobileprovision to embed,
leave blank for no embedded profile. (default '')
:arg str codesign-id: Override the code signing identity specified
in the project. (default '')
:arg bool allow-failing: if true will prevent this build step from
failing if xcodebuild exits with a non-zero return code. (default
false)
:arg str version-technical: The value to use for CFBundleVersion.
Leave blank to use project's technical number. (default '')
:arg str version-marketing: The value to use for
CFBundleShortVersionString. Leave blank to use project's
marketing number. (default '')
:arg str ipa-version: A pattern for the ipa file name. You may use
${VERSION} and ${BUILD_DATE} (yyyy.MM.dd) in this string.
(default '')
:arg str ipa-output: The output directory for the .ipa file,
relative to the build directory. (default '')
:arg str keychain-name: The globally configured keychain to unlock for
this build. (default '')
:arg str keychain-path: The path of the keychain to use to sign the IPA.
(default '')
:arg str keychain-password: The password to use to unlock the keychain.
(default '')
:arg str keychain-unlock: Unlocks the keychain during use.
(default false)
Example:
.. literalinclude:: /../../tests/builders/fixtures/xcode.yaml
:language: yaml
"""
if data.get('developer-profile'):
profile = XML.SubElement(xml_parent, 'au.com.rayh.'
'DeveloperProfileLoader')
mapping = [('developer-profile', 'id', None)]
convert_mapping_to_xml(profile, data, mapping, fail_required=False)
xcode = XML.SubElement(xml_parent, 'au.com.rayh.XCodeBuilder')
mappings = [
('clean-build', 'cleanBeforeBuild', False),
('clean-test-reports', 'cleanTestReports', False),
('archive', 'generateArchive', False),
('configuration', 'configuration', 'Release'),
('configuration-directory', 'configurationBuildDir', ''),
('target', 'target', ''),
('sdk', 'sdk', ''),
('symroot', 'symRoot', ''),
('project-path', 'xcodeProjectPath', ''),
('project-file', 'xcodeProjectFile', ''),
('build-arguments', 'xcodebuildArguments', ''),
('schema', 'xcodeSchema', ''),
('workspace', 'xcodeWorkspaceFile', ''),
('profile', 'embeddedProfileFile', ''),
('codesign-id', 'codeSigningIdentity', ''),
('allow-failing', 'allowFailingBuildResults', False),
]
convert_mapping_to_xml(xcode, data, mappings, fail_required=True)
version = XML.SubElement(xcode, 'provideApplicationVersion')
version_technical = XML.SubElement(xcode,
'cfBundleVersionValue')
version_marketing = XML.SubElement(xcode,
'cfBundleShortVersionStringValue')
if data.get('version-technical') or data.get('version-marketing'):
version.text = 'true'
version_technical.text = data.get('version-technical', '')
version_marketing.text = data.get('version-marketing', '')
else:
version.text = 'false'
XML.SubElement(xcode, 'buildIpa').text = str(
bool(data.get('ipa-version')) or False).lower()
mapping = [
('ipa-version', 'ipaName', ''),
('ipa-output', 'ipaOutputDirectory', ''),
('keychain-name', 'keychainName', ''),
('keychain-path', 'keychainPath', ''),
('keychain-password', 'keychainPwd', ''),
('keychain-unlock', 'unlockKeychain', False),
]
convert_mapping_to_xml(xcode, data, mapping, fail_required=True)
def sonatype_clm(registry, xml_parent, data):
"""yaml: sonatype-clm
Requires the Jenkins :jenkins-wiki:`Sonatype CLM Plugin
<Sonatype+CLM+%28formerly+Insight+for+CI%29>`.
:arg str value: Select CLM application from a list of available CLM
applications or specify CLM Application ID (default list)
:arg str application-name: Determines the policy elements to associate
with this build. (required)
:arg str username: Username on the Sonatype CLM server. Leave empty to
use the username configured at global level. (default '')
:arg str password: Password on the Sonatype CLM server. Leave empty to
use the password configured at global level. (default '')
:arg bool fail-on-clm-server-failure: Controls the build outcome if there
is a failure in communicating with the CLM server. (default false)
:arg str stage: Controls the stage the policy evaluation will be run
against on the CLM server. Valid stages: build, stage-release, release,
operate. (default 'build')
:arg str scan-targets: Pattern of files to include for scanning.
(default '')
:arg str module-excludes: Pattern of files to exclude. (default '')
:arg str advanced-options: Options to be set on a case-by-case basis as
advised by Sonatype Support. (default '')
Minimal Example:
.. literalinclude::
/../../tests/builders/fixtures/sonatype-clm-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/builders/fixtures/sonatype-clm-full.yaml
:language: yaml
"""
clm = XML.SubElement(xml_parent,
'com.sonatype.insight.ci.hudson.PreBuildScan')
clm.set('plugin', 'sonatype-clm-ci')
SUPPORTED_VALUES = ['list', 'manual']
SUPPORTED_STAGES = ['build', 'stage-release', 'release', 'operate']
application_select = XML.SubElement(clm,
'applicationSelectType')
application_mappings = [
('value', 'value', 'list', SUPPORTED_VALUES),
('application-name', 'applicationId', None),
]
convert_mapping_to_xml(
application_select, data, application_mappings, fail_required=True)
path = XML.SubElement(clm, 'pathConfig')
path_mappings = [
('scan-targets', 'scanTargets', ''),
('module-excludes', 'moduleExcludes', ''),
('advanced-options', 'scanProperties', ''),
]
convert_mapping_to_xml(path, data, path_mappings, fail_required=True)
mappings = [
('fail-on-clm-server-failure', 'failOnClmServerFailures', False),
('stage', 'stageId', 'build', SUPPORTED_STAGES),
('username', 'username', ''),
('password', 'password', ''),
]
convert_mapping_to_xml(clm, data, mappings, fail_required=True)
def beaker(registry, xml_parent, data):
"""yaml: beaker
Execute a beaker build step. Requires the Jenkins :jenkins-wiki:`Beaker
Builder Plugin <Beaker+Builder+Plugin>`.
:arg str content: Run job from string
(Alternative: you can choose a path instead)
:arg str path: Run job from file
(Alternative: you can choose a content instead)
:arg bool download-logs: Download Beaker log files (default false)
Example:
.. literalinclude:: ../../tests/builders/fixtures/beaker-path.yaml
:language: yaml
.. literalinclude:: ../../tests/builders/fixtures/beaker-content.yaml
:language: yaml
"""
beaker = XML.SubElement(xml_parent, 'org.jenkinsci.plugins.beakerbuilder.'
'BeakerBuilder')
jobSource = XML.SubElement(beaker, 'jobSource')
if 'content' in data and 'path' in data:
raise JenkinsJobsException("Use just one of 'content' or 'path'")
elif 'content' in data:
jobSourceClass = "org.jenkinsci.plugins.beakerbuilder.StringJobSource"
jobSource.set('class', jobSourceClass)
XML.SubElement(jobSource, 'jobContent').text = data['content']
elif 'path' in data:
jobSourceClass = "org.jenkinsci.plugins.beakerbuilder.FileJobSource"
jobSource.set('class', jobSourceClass)
XML.SubElement(jobSource, 'jobPath').text = data['path']
else:
raise JenkinsJobsException("Use one of 'content' or 'path'")
XML.SubElement(beaker, 'downloadFiles').text = str(data.get(
'download-logs', False)).lower()
def cloudformation(registry, xml_parent, data):
"""yaml: cloudformation
Create cloudformation stacks before running a build and optionally
delete them at the end. Requires the Jenkins :jenkins-wiki:`AWS
Cloudformation Plugin <AWS+Cloudformation+Plugin>`.
:arg list name: The names of the stacks to create (required)
:arg str description: Description of the stack (optional)
:arg str recipe: The cloudformation recipe file (required)
:arg list parameters: List of key/value pairs to pass
into the recipe, will be joined together into a comma separated
string (optional)
:arg int timeout: Number of seconds to wait before giving up creating
a stack (default 0)
:arg str access-key: The Amazon API Access Key (required)
:arg str secret-key: The Amazon API Secret Key (required)
:arg int sleep: Number of seconds to wait before continuing to the
next step (default 0)
:arg array region: The region to run cloudformation in (required)
:region values:
* **us-east-1**
* **us-west-1**
* **us-west-2**
* **eu-central-1**
* **eu-west-1**
* **ap-southeast-1**
* **ap-southeast-2**
* **ap-northeast-1**
* **sa-east-1**
Example:
.. literalinclude:: ../../tests/builders/fixtures/cloudformation.yaml
:language: yaml
"""
region_dict = cloudformation_region_dict()
stacks = cloudformation_init(xml_parent, data, 'CloudFormationBuildStep')
for stack in data:
cloudformation_stack(xml_parent, stack, 'PostBuildStackBean', stacks,
region_dict)
def jms_messaging(registry, xml_parent, data):
"""yaml: jms-messaging
The JMS Messaging Plugin provides the following functionality:
- A build trigger to submit jenkins jobs upon receipt
of a matching message.
- A builder that may be used to submit a message to the topic
upon the completion of a job
- A post-build action that may be used to submit a message to the topic
upon the completion of a job
JMS Messaging provider types supported:
- ActiveMQ
- FedMsg
Requires the Jenkins :jenkins-wiki:`JMS Messaging Plugin
Pipeline Plugin <JMS+Messaging+Plugin>`.
:arg str override-topic: If you need to override the default topic.
(default '')
:arg str provider-name: Name of message provider setup in the
global config. (default '')
:arg str msg-type: A message type
(default 'CodeQualityChecksDone')
:arg str msg-props: Message header to publish. (default '')
:arg str msg-content: Message body to publish. (default '')
Full Example:
.. literalinclude::
../../tests/builders/fixtures/jms-messaging-full.yaml
:language: yaml
Minimal Example:
.. literalinclude::
../../tests/builders/fixtures/jms-messaging-minimal.yaml
:language: yaml
"""
helpers.jms_messaging_common(xml_parent, 'com.redhat.jenkins.plugins.ci.'
'CIMessageBuilder', data)
def openshift_build_verify(registry, xml_parent, data):
"""yaml: openshift-build-verify
Performs the equivalent of an 'oc get builds` command invocation for the
provided buildConfig key provided; once the list of builds are obtained,
the state of the latest build is inspected for up to a minute to see if
it has completed successfully.
Requires the Jenkins :jenkins-wiki:`OpenShift
Pipeline Plugin <OpenShift+Pipeline+Plugin>`.
:arg str api-url: this would be the value you specify if you leverage the
--server option on the OpenShift `oc` command.
(default '\https://openshift.default.svc.cluster.local')
:arg str bld-cfg: The value here should be whatever was the output
form `oc project` when you created the BuildConfig you
want to run a Build on (default 'frontend')
:arg str namespace: If you run `oc get bc` for the project listed in
"namespace", that is the value you want to put here. (default 'test')
:arg str auth-token: The value here is what you supply with the --token
option when invoking the OpenShift `oc` command. (default '')
:arg bool verbose: This flag is the toggle for
turning on or off detailed logging in this plug-in. (default false)
Full Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-build-verify001.yaml
:language: yaml
Minimal Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-build-verify002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'com.openshift.jenkins.plugins.pipeline.'
'OpenShiftBuildVerifier')
mapping = [
# option, xml name, default value
("api-url", 'apiURL', 'https://openshift.default.svc.cluster.local'),
("bld-cfg", 'bldCfg', 'frontend'),
("namespace", 'namespace', 'test'),
("auth-token", 'authToken', ''),
("verbose", 'verbose', False),
]
convert_mapping_to_xml(osb, data, mapping, fail_required=True)
def openshift_builder(registry, xml_parent, data):
"""yaml: openshift-builder
Perform builds in OpenShift for the job.
Requires the Jenkins :jenkins-wiki:`OpenShift
Pipeline Plugin <OpenShift+Pipeline+Plugin>`.
:arg str api-url: this would be the value you specify if you leverage the
--server option on the OpenShift `oc` command.
(default '\https://openshift.default.svc.cluster.local')
:arg str bld-cfg: The value here should be whatever was the output
form `oc project` when you created the BuildConfig you want to run a
Build on (default 'frontend')
:arg str namespace: If you run `oc get bc` for the project listed in
"namespace", that is the value you want to put here. (default 'test')
:arg str auth-token: The value here is what you supply with the --token
option when invoking the OpenShift `oc` command. (default '')
:arg str commit-ID: The value here is what you supply with the
--commit option when invoking the
OpenShift `oc start-build` command. (default '')
:arg bool verbose: This flag is the toggle for
turning on or off detailed logging in this plug-in. (default false)
:arg str build-name: TThe value here is what you supply with the
--from-build option when invoking the
OpenShift `oc start-build` command. (default '')
:arg bool show-build-logs: Indicates whether the build logs get dumped
to the console of the Jenkins build. (default false)
Full Example:
.. literalinclude:: ../../tests/builders/fixtures/openshift-builder001.yaml
:language: yaml
Minimal Example:
.. literalinclude:: ../../tests/builders/fixtures/openshift-builder002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'com.openshift.jenkins.plugins.pipeline.'
'OpenShiftBuilder')
mapping = [
# option, xml name, default value
("api-url", 'apiURL', 'https://openshift.default.svc.cluster.local'),
("bld-cfg", 'bldCfg', 'frontend'),
("namespace", 'namespace', 'test'),
("auth-token", 'authToken', ''),
("commit-ID", 'commitID', ''),
("verbose", 'verbose', False),
("build-name", 'buildName', ''),
("show-build-logs", 'showBuildLogs', False),
]
convert_mapping_to_xml(osb, data, mapping, fail_required=True)
def openshift_creator(registry, xml_parent, data):
"""yaml: openshift-creator
Performs the equivalent of an oc create command invocation;
this build step takes in the provided JSON or YAML text, and if it
conforms to OpenShift schema, creates whichever
OpenShift resources are specified.
Requires the Jenkins :jenkins-wiki:`OpenShift
Pipeline Plugin <OpenShift+Pipeline+Plugin>`.
:arg str api-url: this would be the value you specify if you leverage the
--server option on the OpenShift `oc` command.
(default '\https://openshift.default.svc.cluster.local')
:arg str jsonyaml: The JSON or YAML formatted text that conforms to
the schema for defining the various OpenShift resources. (default '')
:arg str namespace: If you run `oc get bc` for the project listed in
"namespace", that is the value you want to put here. (default 'test')
:arg str auth-token: The value here is what you supply with the --token
option when invoking the OpenShift `oc` command. (default '')
:arg bool verbose: This flag is the toggle for
turning on or off detailed logging in this plug-in. (default false)
Full Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-creator001.yaml
:language: yaml
Minimal Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-creator002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'com.openshift.jenkins.plugins.pipeline.'
'OpenShiftCreator')
mapping = [
# option, xml name, default value
("api-url", 'apiURL', 'https://openshift.default.svc.cluster.local'),
("jsonyaml", 'jsonyaml', ''),
("namespace", 'namespace', 'test'),
("auth-token", 'authToken', ''),
("verbose", 'verbose', False),
]
convert_mapping_to_xml(osb, data, mapping, fail_required=True)
def openshift_dep_verify(registry, xml_parent, data):
"""yaml: openshift-dep-verify
Determines whether the expected set of DeploymentConfig's,
ReplicationController's, and active replicas are present based on prior
use of the scaler (2) and deployer (3) steps
Requires the Jenkins :jenkins-wiki:`OpenShift
Pipeline Plugin <OpenShift+Pipeline+Plugin>`._
:arg str api-url: this would be the value you specify if you leverage the
--server option on the OpenShift `oc` command.
(default \https://openshift.default.svc.cluster.local\)
:arg str dep-cfg: The value here should be whatever was the output
form `oc project` when you created the BuildConfig you want to run a
Build on (default frontend)
:arg str namespace: If you run `oc get bc` for the project listed in
"namespace", that is the value you want to put here. (default test)
:arg int replica-count: The value here should be whatever the number
of pods you want started for the deployment. (default 0)
:arg str auth-token: The value here is what you supply with the --token
option when invoking the OpenShift `oc` command. (default '')
:arg bool verbose: This flag is the toggle for
turning on or off detailed logging in this plug-in. (default false)
Full Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-dep-verify001.yaml
:language: yaml
Minimal Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-dep-verify002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'com.openshift.jenkins.plugins.pipeline.'
'OpenShiftDeploymentVerifier')
mapping = [
# option, xml name, default value
("api-url", 'apiURL', 'https://openshift.default.svc.cluster.local'),
("dep-cfg", 'depCfg', 'frontend'),
("namespace", 'namespace', 'test'),
("replica-count", 'replicaCount', 0),
("auth-token", 'authToken', ''),
("verbose", 'verbose', False),
]
convert_mapping_to_xml(osb, data, mapping, fail_required=True)
def openshift_deployer(registry, xml_parent, data):
"""yaml: openshift-deployer
Start a deployment in OpenShift for the job.
Requires the Jenkins :jenkins-wiki:`OpenShift
Pipeline Plugin <OpenShift+Pipeline+Plugin>`.
:arg str api-url: this would be the value you specify if you leverage the
--server option on the OpenShift `oc` command.
(default '\https://openshift.default.svc.cluster.local')
:arg str dep-cfg: The value here should be whatever was the output
form `oc project` when you created the BuildConfig you want to run a
Build on (default 'frontend')
:arg str namespace: If you run `oc get bc` for the project listed in
"namespace", that is the value you want to put here. (default 'test')
:arg str auth-token: The value here is what you supply with the --token
option when invoking the OpenShift `oc` command. (default '')
:arg bool verbose: This flag is the toggle for
turning on or off detailed logging in this plug-in. (default false)
Full Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-deployer001.yaml
:language: yaml
Minimal Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-deployer002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'com.openshift.jenkins.plugins.pipeline.'
'OpenShiftDeployer')
mapping = [
# option, xml name, default value
("api-url", 'apiURL', 'https://openshift.default.svc.cluster.local'),
("dep-cfg", 'depCfg', 'frontend'),
("namespace", 'namespace', 'test'),
("auth-token", 'authToken', ''),
("verbose", 'verbose', False),
]
convert_mapping_to_xml(osb, data, mapping, fail_required=True)
def openshift_img_tagger(registry, xml_parent, data):
"""yaml: openshift-img-tagger
Performs the equivalent of an oc tag command invocation in order to
manipulate tags for images in OpenShift ImageStream's
Requires the Jenkins :jenkins-wiki:`OpenShift
Pipeline Plugin <OpenShift+Pipeline+Plugin>`.
:arg str api-url: this would be the value you specify if you leverage the
--server option on the OpenShift `oc` command.
(default '\https://openshift.default.svc.cluster.local')
:arg str test-tag: The equivalent to the name supplied to a
`oc get service` command line invocation.
(default 'origin-nodejs-sample:latest')
:arg str prod-tag: The equivalent to the name supplied to a
`oc get service` command line invocation.
(default 'origin-nodejs-sample:prod')
:arg str namespace: If you run `oc get bc` for the project listed in
"namespace", that is the value you want to put here. (default 'test')
:arg str auth-token: The value here is what you supply with the --token
option when invoking the OpenShift `oc` command. (default '')
:arg bool verbose: This flag is the toggle for
turning on or off detailed logging in this plug-in. (default false)
Full Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-img-tagger001.yaml
:language: yaml
Minimal Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-img-tagger002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'com.openshift.jenkins.plugins.pipeline.'
'OpenShiftImageTagger')
mapping = [
# option, xml name, default value
("api-url", 'apiURL', 'https://openshift.default.svc.cluster.local'),
("test-tag", 'testTag', 'origin-nodejs-sample:latest'),
("prod-tag", 'prodTag', 'origin-nodejs-sample:prod'),
("namespace", 'namespace', 'test'),
("auth-token", 'authToken', ''),
("verbose", 'verbose', False),
]
convert_mapping_to_xml(osb, data, mapping, fail_required=True)
def openshift_scaler(registry, xml_parent, data):
"""yaml: openshift-scaler
Scale deployments in OpenShift for the job.
Requires the Jenkins :jenkins-wiki:`OpenShift
Pipeline Plugin <OpenShift+Pipeline+Plugin>`.
:arg str api-url: this would be the value you specify if you leverage the
--server option on the OpenShift `oc` command.
(default '\https://openshift.default.svc.cluster.local')
:arg str dep-cfg: The value here should be whatever was the output
form `oc project` when you created the BuildConfig you want to run a
Build on (default 'frontend')
:arg str namespace: If you run `oc get bc` for the project listed in
"namespace", that is the value you want to put here. (default 'test')
:arg int replica-count: The value here should be whatever the number
of pods you want started for the deployment. (default 0)
:arg str auth-token: The value here is what you supply with the --token
option when invoking the OpenShift `oc` command. (default '')
:arg bool verbose: This flag is the toggle for
turning on or off detailed logging in this plug-in. (default false)
Full Example:
.. literalinclude:: ../../tests/builders/fixtures/openshift-scaler001.yaml
:language: yaml
Minimal Example:
.. literalinclude:: ../../tests/builders/fixtures/openshift-scaler002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'com.openshift.jenkins.plugins.pipeline.'
'OpenShiftScaler')
mapping = [
# option, xml name, default value
("api-url", 'apiURL', 'https://openshift.default.svc.cluster.local'),
("dep-cfg", 'depCfg', 'frontend'),
("namespace", 'namespace', 'test'),
("replica-count", 'replicaCount', 0),
("auth-token", 'authToken', ''),
("verbose", 'verbose', False),
]
convert_mapping_to_xml(osb, data, mapping, fail_required=True)
def openshift_svc_verify(registry, xml_parent, data):
"""yaml: openshift-svc-verify
Verify a service is up in OpenShift for the job.
Requires the Jenkins :jenkins-wiki:`OpenShift
Pipeline Plugin <OpenShift+Pipeline+Plugin>`.
:arg str api-url: this would be the value you specify if you leverage the
--server option on the OpenShift `oc` command.
(default '\https://openshift.default.svc.cluster.local')
:arg str svc-name: The equivalent to the name supplied to a
`oc get service` command line invocation. (default 'frontend')
:arg str namespace: If you run `oc get bc` for the project listed in
"namespace", that is the value you want to put here. (default 'test')
:arg str auth-token: The value here is what you supply with the --token
option when invoking the OpenShift `oc` command. (default '')
:arg bool verbose: This flag is the toggle for
turning on or off detailed logging in this plug-in. (default false)
Full Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-svc-verify001.yaml
:language: yaml
Minimal Example:
.. literalinclude::
../../tests/builders/fixtures/openshift-svc-verify002.yaml
:language: yaml
"""
osb = XML.SubElement(xml_parent,
'com.openshift.jenkins.plugins.pipeline.'
'OpenShiftServiceVerifier')
mapping = [
# option, xml name, default value
("api-url", 'apiURL', 'https://openshift.default.svc.cluster.local'),
("svc-name", 'svcName', 'frontend'),
("namespace", 'namespace', 'test'),
("auth-token", 'authToken', ''),
("verbose", 'verbose', False),
]
convert_mapping_to_xml(osb, data, mapping, fail_required=True)
def runscope(registry, xml_parent, data):
"""yaml: runscope
Execute a Runscope test.
Requires the Jenkins :jenkins-wiki:`Runscope Plugin <Runscope+Plugin>`.
:arg str test-trigger-url: Trigger URL for test. (required)
:arg str access-token: OAuth Personal Access token. (required)
:arg int timeout: Timeout for test duration in seconds. (default 60)
Minimal Example:
.. literalinclude:: /../../tests/builders/fixtures/runscope-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: /../../tests/builders/fixtures/runscope-full.yaml
:language: yaml
"""
runscope = XML.SubElement(xml_parent,
'com.runscope.jenkins.Runscope.RunscopeBuilder')
runscope.set('plugin', 'runscope')
mapping = [
('test-trigger-url', 'triggerEndPoint', None),
('access-token', 'accessToken', None),
('timeout', 'timeout', 60),
]
convert_mapping_to_xml(runscope, data, mapping, fail_required=True)
def description_setter(registry, xml_parent, data):
"""yaml: description-setter
This plugin sets the description for each build,
based upon a RegEx test of the build log file.
Requires the Jenkins :jenkins-wiki:`Description Setter Plugin
<Description+Setter+Plugin>`.
:arg str regexp: A RegEx which is used to scan the build log file
(default '')
:arg str description: The description to set on the build (optional)
Example:
.. literalinclude::
/../../tests/builders/fixtures/description-setter001.yaml
:language: yaml
"""
descriptionsetter = XML.SubElement(
xml_parent,
'hudson.plugins.descriptionsetter.DescriptionSetterBuilder')
XML.SubElement(descriptionsetter, 'regexp').text = data.get('regexp', '')
if 'description' in data:
XML.SubElement(descriptionsetter, 'description').text = data[
'description']
def docker_build_publish(parse, xml_parent, data):
"""yaml: docker-build-publish
Requires the Jenkins :jenkins-wiki:`Docker build publish Plugin
<Docker+build+publish+Plugin>`.
:arg str repo-name: Name of repository to push to.
:arg str repo-tag: Tag for image. (default '')
:arg dict server: The docker daemon (optional)
* **uri** (str): Define the docker server to use. (optional)
* **credentials-id** (str): ID of credentials to use to connect
(optional)
:arg dict registry: Registry to push to
* **url** (str) repository url to use (optional)
* **credentials-id** (str): ID of credentials to use to connect
(optional)
:arg bool no-cache: If build should be cached. (default false)
:arg bool no-force-pull: Don't update the source image before building when
it exists locally. (default false)
:arg bool skip-build: Do not build the image. (default false)
:arg bool skip-decorate: Do not decorate the build name. (default false)
:arg bool skip-tag-latest: Do not tag this build as latest. (default false)
:arg bool skip-push: Do not push. (default false)
:arg str file-path: Path of the Dockerfile. (default '')
:arg str build-context: Project root path for the build, defaults to the
workspace if not specified. (default '')
Minimal example:
.. literalinclude:: /../../tests/builders/fixtures/docker-builder001.yaml
Full example:
.. literalinclude:: /../../tests/builders/fixtures/docker-builder002.yaml
"""
db = XML.SubElement(xml_parent,
'com.cloudbees.dockerpublish.DockerBuilder')
db.set('plugin', 'docker-build-publish')
mapping = [
('repo-name', 'repoName', None),
('repo-tag', 'repoTag', ''),
('no-cache', 'noCache', False),
('no-force-pull', 'noForcePull', False),
('skip-build', 'skipBuild', False),
('skip-decorate', 'skipDecorate', False),
('skip-tag-latest', 'skipTagLatest', False),
('skip-push', 'skipPush', False),
('file-path', 'dockerfilePath', ''),
('build-context', 'buildContext', ''),
]
convert_mapping_to_xml(db, data, mapping, fail_required=True)
mapping = []
if 'server' in data:
server = XML.SubElement(db, 'server')
server.set('plugin', 'docker-commons')
server_data = data['server']
if 'credentials-id' in server_data:
mapping.append(('credentials-id', 'credentialsId', None))
if 'uri' in server_data:
mapping.append(('uri', 'uri', None))
convert_mapping_to_xml(
server, server_data, mapping, fail_required=True)
mappings = []
if 'registry' in data:
registry = XML.SubElement(db, 'registry')
registry.set('plugin', 'docker-commons')
registry_data = data['registry']
if 'credentials-id' in registry_data:
mappings.append(('credentials-id', 'credentialsId', None))
if 'url' in registry_data:
mappings.append(('url', 'url', None))
convert_mapping_to_xml(
registry, registry_data, mappings, fail_required=True)
def build_name_setter(registry, xml_parent, data):
"""yaml: build-name-setter
Define Build Name Setter options which allows your build name to be
updated during the build process.
Requires the Jenkins :jenkins-wiki:`Build Name Setter Plugin
<Build+Name+Setter+Plugin>`.
:arg str name: Filename to use for Build Name Setter, only used if
file bool is true. (default 'version.txt')
:arg str template: Macro Template string, only used if macro
bool is true. (default '#${BUILD_NUMBER}')
:arg bool file: Read from named file (default false)
:arg bool macro: Read from macro template (default false)
:arg bool macro-first: Insert macro first (default false)
File Example:
.. literalinclude::
/../../tests/builders/fixtures/build-name-setter001.yaml
:language: yaml
Macro Example:
.. literalinclude::
/../../tests/builders/fixtures/build-name-setter002.yaml
:language: yaml
"""
build_name_setter = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.buildnameupdater.BuildNameUpdater')
mapping = [
('name', 'buildName', 'version.txt'),
('template', 'macroTemplate', '#${BUILD_NUMBER}'),
('file', 'fromFile', False),
('macro', 'fromMacro', False),
('macro-first', 'macroFirst', False),
]
convert_mapping_to_xml(
build_name_setter, data, mapping, fail_required=True)
def nexus_artifact_uploader(registry, xml_parent, data):
"""yaml: nexus-artifact-uploader
To upload result of a build as an artifact in Nexus without the need of
Maven. Requires the Jenkins :nexus-artifact-uploader:
`Nexus Artifact Uploader Plugin <Nexus+Artifact+Uploader>`.
:arg str protocol: Protocol to use to connect to Nexus (default https)
:arg str nexus_url: Nexus url (without protocol) (default '')
:arg str nexus_user: Username to upload artifact to Nexus (default '')
:arg str nexus_password: Password to upload artifact to Nexus
(default '')
:arg str group_id: GroupId to set for the artifact to upload
(default '')
:arg str artifact_id: ArtifactId to set for the artifact to upload
(default '')
:arg str version: Version to set for the artifact to upload
(default '')
:arg str packaging: Packaging to set for the artifact to upload
(default '')
:arg str type: Type to set for the artifact to upload (default '')
:arg str classifier: Classifier to set for the artifact to upload
(default '')
:arg str repository: In which repository to upload the artifact
(default '')
:arg str file: File which will be the uploaded artifact (default '')
:arg str credentials_id: Credentials to use (instead of password)
(default '')
File Example:
.. literalinclude::
/../../tests/builders/fixtures/nexus-artifact-uploader.yaml
:language: yaml
"""
nexus_artifact_uploader = XML.SubElement(
xml_parent,
'sp.sd.nexusartifactuploader.NexusArtifactUploader')
mapping = [
('protocol', 'protocol', 'https'),
('nexus_url', 'nexusUrl', ''),
('nexus_user', 'nexusUser', ''),
('nexus_password', 'nexusPassword', ''),
('group_id', 'groupId', ''),
('artifact_id', 'artifactId', ''),
('version', 'version', ''),
('packaging', 'packaging', ''),
('type', 'type', ''),
('classifier', 'classifier', ''),
('repository', 'repository', ''),
('file', 'file', ''),
('credentials_id', 'credentialsId', ''),
]
convert_mapping_to_xml(
nexus_artifact_uploader, data, mapping, fail_required=True)
def ansible_playbook(parser, xml_parent, data):
"""yaml: ansible-playbook
This plugin allows to execute Ansible tasks as a job build step.
Requires the Jenkins :jenkins-wiki:`Ansible Plugin <Ansible+Plugin>`.
:arg str playbook: Path to the ansible playbook file. The path can be
absolute or relative to the job workspace. (required)
:arg str inventory-type: The inventory file form (default `path`)
:inventory-type values:
* **path**
* **content**
:arg dict inventory: Inventory data, depends on inventory-type
:inventory-type == path:
* **path** (`str`) -- Path to inventory file.
:inventory-type == content:
* **content** (`str`) -- Content of inventory file.
* **dynamic** (`bool`) -- Dynamic inventory is used (default false)
:arg str hosts: Further limit selected hosts to an additional pattern
(default '')
:arg str tags-to-run: Only run plays and tasks tagged with these values
(default '')
:arg str tags-to-skip: Only run plays and tasks whose tags do not match
these values (default '')
:arg str task-to-start-at: Start the playbook at the task matching this
name (default '')
:arg int workers: Specify number of parallel processes to use (default 5)
:arg str credentials-id: The ID of credentials for the SSH connections.
Only private key authentication is supported (default '')
:arg bool sudo: Run operations with sudo. It works only when the remote
user is sudoer with nopasswd option (default false)
:arg str sudo-user: Desired sudo user. "root" is used when this field is
empty. (default '')
:arg bool unbuffered-output: Skip standard output buffering for the ansible
process. The ansible output is directly rendered into the Jenkins
console. This option can be usefull for long running operations.
(default true)
:arg bool colorized-output: Check this box to allow ansible to render ANSI
color codes in the Jenkins console. (default false)
:arg bool host-key-checking: Check this box to enforce the validation of
the hosts SSH server keys. (default false)
:arg str additional-parameters: Any additional parameters to pass to the
ansible command. (default '')
:arg list variables: List of extra variables to be passed to ansible.
(optional)
:variable item:
* **name** (`str`) -- Name of variable (required)
* **value** (`str`) -- Desired value (default '')
* **hidden** (`bool`) -- Hide variable in build log (default false)
Example:
.. literalinclude::
/../../tests/builders/fixtures/ansible-playbook001.yaml
:language: yaml
OR
.. literalinclude::
/../../tests/builders/fixtures/ansible-playbook002.yaml
:language: yaml
"""
plugin = XML.SubElement(
xml_parent,
'org.jenkinsci.plugins.ansible.AnsiblePlaybookBuilder')
try:
XML.SubElement(plugin, 'playbook').text = str(data['playbook'])
except KeyError as ex:
raise MissingAttributeError(ex)
inventory_types = ('path', 'content')
inventory_type = str(
data.get('inventory-type', inventory_types[0])).lower()
inventory = XML.SubElement(plugin, 'inventory')
inv_data = data.get('inventory', {})
if inventory_type == 'path':
inventory.set(
'class', 'org.jenkinsci.plugins.ansible.InventoryPath')
try:
path = inv_data['path']
except KeyError:
raise MissingAttributeError('inventory[\'path\']')
XML.SubElement(inventory, 'path').text = path
elif inventory_type == 'content':
inventory.set(
'class', 'org.jenkinsci.plugins.ansible.InventoryContent')
try:
content = inv_data['content']
except KeyError:
raise MissingAttributeError('inventory[\'content\']')
XML.SubElement(inventory, 'content').text = content
XML.SubElement(inventory, 'dynamic').text = str(
inv_data.get('dynamic', False)).lower()
else:
raise InvalidAttributeError(
'inventory-type', inventory_type, inventory_types)
XML.SubElement(plugin, 'limit').text = data.get('hosts', '')
XML.SubElement(plugin, 'tags').text = data.get('tags-to-run', '')
XML.SubElement(plugin, 'skippedTags').text = data.get('tags-to-skip', '')
XML.SubElement(plugin, 'startAtTask').text = data.get(
'task-to-start-at', '')
XML.SubElement(plugin, 'credentialsId').text = data.get(
'credentials-id', '')
if data.get('sudo', False):
XML.SubElement(plugin, 'sudo').text = 'true'
XML.SubElement(plugin, 'sudoUser').text = data.get('sudo-user', '')
else:
XML.SubElement(plugin, 'sudo').text = 'false'
XML.SubElement(plugin, 'forks').text = str(data.get('workers', '5'))
XML.SubElement(plugin, 'unbufferedOutput').text = str(
data.get('unbuffered-output', True)).lower()
XML.SubElement(plugin, 'colorizedOutput').text = str(
data.get('colorized-output', False)).lower()
XML.SubElement(plugin, 'hostKeyChecking').text = str(
data.get('host-key-checking', False)).lower()
XML.SubElement(plugin, 'additionalParameters').text = str(
data.get('additional-parameters', ''))
# Following option is not available from UI
XML.SubElement(plugin, 'copyCredentialsInWorkspace').text = 'false'
variables = data.get('variables', [])
if variables:
if not is_sequence(variables):
raise InvalidAttributeError(
'variables', variables, 'list(dict(name, value, hidden))')
variables_elm = XML.SubElement(plugin, 'extraVars')
for idx, values in enumerate(variables):
if not hasattr(values, 'keys'):
raise InvalidAttributeError(
'variables[%s]' % idx, values, 'dict(name, value, hidden)')
try:
var_name = values['name']
except KeyError:
raise MissingAttributeError('variables[%s][\'name\']' % idx)
value_elm = XML.SubElement(
variables_elm, 'org.jenkinsci.plugins.ansible.ExtraVar')
XML.SubElement(value_elm, 'key').text = var_name
XML.SubElement(value_elm, 'value').text = values.get('value', '')
XML.SubElement(value_elm, 'hidden').text = str(
values.get('hidden', False)).lower()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
=================================================
Orthogonal distance regression (:mod:`scipy.odr`)
=================================================
.. currentmodule:: scipy.odr
Package Content
===============
.. autosummary::
:toctree: generated/
Data -- The data to fit.
RealData -- Data with weights as actual std. dev.s and/or covariances.
Model -- Stores information about the function to be fit.
ODR -- Gathers all info & manages the main fitting routine.
Output -- Result from the fit.
odr -- Low-level function for ODR.
OdrWarning -- Warning about potential problems when running ODR
OdrError -- Error exception.
OdrStop -- Stop exception.
odr_error -- Same as OdrError (for backwards compatibility)
odr_stop -- Same as OdrStop (for backwards compatibility)
Prebuilt models:
.. autosummary::
:toctree: generated/
polynomial
.. data:: exponential
.. data:: multilinear
.. data:: unilinear
.. data:: quadratic
.. data:: polynomial
Usage information
=================
Introduction
------------
Why Orthogonal Distance Regression (ODR)? Sometimes one has
measurement errors in the explanatory (a.k.a., "independent")
variable(s), not just the response (a.k.a., "dependent") variable(s).
Ordinary Least Squares (OLS) fitting procedures treat the data for
explanatory variables as fixed, i.e., not subject to error of any kind.
Furthermore, OLS procedures require that the response variables be an
explicit function of the explanatory variables; sometimes making the
equation explicit is impractical and/or introduces errors. ODR can
handle both of these cases with ease, and can even reduce to the OLS
case if that is sufficient for the problem.
ODRPACK is a FORTRAN-77 library for performing ODR with possibly
non-linear fitting functions. It uses a modified trust-region
Levenberg-Marquardt-type algorithm [1]_ to estimate the function
parameters. The fitting functions are provided by Python functions
operating on NumPy arrays. The required derivatives may be provided
by Python functions as well, or may be estimated numerically. ODRPACK
can do explicit or implicit ODR fits, or it can do OLS. Input and
output variables may be multi-dimensional. Weights can be provided to
account for different variances of the observations, and even
covariances between dimensions of the variables.
The `scipy.odr` package offers an object-oriented interface to
ODRPACK, in addition to the low-level `odr` function.
Additional background information about ODRPACK can be found in the
`ODRPACK User's Guide
<https://docs.scipy.org/doc/external/odrpack_guide.pdf>`_, reading
which is recommended.
Basic usage
-----------
1. Define the function you want to fit against.::
def f(B, x):
'''Linear function y = m*x + b'''
# B is a vector of the parameters.
# x is an array of the current x values.
# x is in the same format as the x passed to Data or RealData.
#
# Return an array in the same format as y passed to Data or RealData.
return B[0]*x + B[1]
2. Create a Model.::
linear = Model(f)
3. Create a Data or RealData instance.::
mydata = Data(x, y, wd=1./power(sx,2), we=1./power(sy,2))
or, when the actual covariances are known::
mydata = RealData(x, y, sx=sx, sy=sy)
4. Instantiate ODR with your data, model and initial parameter estimate.::
myodr = ODR(mydata, linear, beta0=[1., 2.])
5. Run the fit.::
myoutput = myodr.run()
6. Examine output.::
myoutput.pprint()
References
----------
.. [1] P. T. Boggs and J. E. Rogers, "Orthogonal Distance Regression,"
in "Statistical analysis of measurement error models and
applications: proceedings of the AMS-IMS-SIAM joint summer research
conference held June 10-16, 1989," Contemporary Mathematics,
vol. 112, pg. 186, 1990.
"""
# version: 0.7
# author: Robert Kern <robert.kern@gmail.com>
# date: 2006-09-21
from __future__ import division, print_function, absolute_import
from .odrpack import *
from .models import *
from . import add_newdocs
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python -u
import glob, os, string, sys, thread, time
# import difflib
import libxml2
###
#
# This is a "Work in Progress" attempt at a python script to run the
# various regression tests. The rationale for this is that it should be
# possible to run this on most major platforms, including those (such as
# Windows) which don't support gnu Make.
#
# The script is driven by a parameter file which defines the various tests
# to be run, together with the unique settings for each of these tests. A
# script for Linux is included (regressions.xml), with comments indicating
# the significance of the various parameters. To run the tests under Windows,
# edit regressions.xml and remove the comment around the default parameter
# "<execpath>" (i.e. make it point to the location of the binary executables).
#
# Note that this current version requires the Python bindings for libxml2 to
# have been previously installed and accessible
#
# See Copyright for the status of this software.
# William Brack (wbrack@mmm.com.hk)
#
###
defaultParams = {} # will be used as a dictionary to hold the parsed params
# This routine is used for comparing the expected stdout / stdin with the results.
# The expected data has already been read in; the result is a file descriptor.
# Within the two sets of data, lines may begin with a path string. If so, the
# code "relativises" it by removing the path component. The first argument is a
# list already read in by a separate thread; the second is a file descriptor.
# The two 'base' arguments are to let me "relativise" the results files, allowing
# the script to be run from any directory.
def compFiles(res, expected, base1, base2):
l1 = len(base1)
exp = expected.readlines()
expected.close()
# the "relativisation" is done here
for i in range(len(res)):
j = string.find(res[i],base1)
if (j == 0) or ((j == 2) and (res[i][0:2] == './')):
col = string.find(res[i],':')
if col > 0:
start = string.rfind(res[i][:col], '/')
if start > 0:
res[i] = res[i][start+1:]
for i in range(len(exp)):
j = string.find(exp[i],base2)
if (j == 0) or ((j == 2) and (exp[i][0:2] == './')):
col = string.find(exp[i],':')
if col > 0:
start = string.rfind(exp[i][:col], '/')
if start > 0:
exp[i] = exp[i][start+1:]
ret = 0
# ideally we would like to use difflib functions here to do a
# nice comparison of the two sets. Unfortunately, during testing
# (using python 2.3.3 and 2.3.4) the following code went into
# a dead loop under windows. I'll pursue this later.
# diff = difflib.ndiff(res, exp)
# diff = list(diff)
# for line in diff:
# if line[:2] != ' ':
# print string.strip(line)
# ret = -1
# the following simple compare is fine for when the two data sets
# (actual result vs. expected result) are equal, which should be true for
# us. Unfortunately, if the test fails it's not nice at all.
rl = len(res)
el = len(exp)
if el != rl:
print 'Length of expected is %d, result is %d' % (el, rl)
ret = -1
for i in range(min(el, rl)):
if string.strip(res[i]) != string.strip(exp[i]):
print '+:%s-:%s' % (res[i], exp[i])
ret = -1
if el > rl:
for i in range(rl, el):
print '-:%s' % exp[i]
ret = -1
elif rl > el:
for i in range (el, rl):
print '+:%s' % res[i]
ret = -1
return ret
# Separate threads to handle stdout and stderr are created to run this function
def readPfile(file, list, flag):
data = file.readlines() # no call by reference, so I cheat
for l in data:
list.append(l)
file.close()
flag.append('ok')
# This routine runs the test program (e.g. xmllint)
def runOneTest(testDescription, filename, inbase, errbase):
if 'execpath' in testDescription:
dir = testDescription['execpath'] + '/'
else:
dir = ''
cmd = os.path.abspath(dir + testDescription['testprog'])
if 'flag' in testDescription:
for f in string.split(testDescription['flag']):
cmd += ' ' + f
if 'stdin' not in testDescription:
cmd += ' ' + inbase + filename
if 'extarg' in testDescription:
cmd += ' ' + testDescription['extarg']
noResult = 0
expout = None
if 'resext' in testDescription:
if testDescription['resext'] == 'None':
noResult = 1
else:
ext = '.' + testDescription['resext']
else:
ext = ''
if not noResult:
try:
fname = errbase + filename + ext
expout = open(fname, 'rt')
except:
print "Can't open result file %s - bypassing test" % fname
return
noErrors = 0
if 'reserrext' in testDescription:
if testDescription['reserrext'] == 'None':
noErrors = 1
else:
if len(testDescription['reserrext'])>0:
ext = '.' + testDescription['reserrext']
else:
ext = ''
else:
ext = ''
if not noErrors:
try:
fname = errbase + filename + ext
experr = open(fname, 'rt')
except:
experr = None
else:
experr = None
pin, pout, perr = os.popen3(cmd)
if 'stdin' in testDescription:
infile = open(inbase + filename, 'rt')
pin.writelines(infile.readlines())
infile.close()
pin.close()
# popen is great fun, but can lead to the old "deadly embrace", because
# synchronizing the writing (by the task being run) of stdout and stderr
# with respect to the reading (by this task) is basically impossible. I
# tried several ways to cheat, but the only way I have found which works
# is to do a *very* elementary multi-threading approach. We can only hope
# that Python threads are implemented on the target system (it's okay for
# Linux and Windows)
th1Flag = [] # flags to show when threads finish
th2Flag = []
outfile = [] # lists to contain the pipe data
errfile = []
th1 = thread.start_new_thread(readPfile, (pout, outfile, th1Flag))
th2 = thread.start_new_thread(readPfile, (perr, errfile, th2Flag))
while (len(th1Flag)==0) or (len(th2Flag)==0):
time.sleep(0.001)
if not noResult:
ret = compFiles(outfile, expout, inbase, 'test/')
if ret != 0:
print 'trouble with %s' % cmd
else:
if len(outfile) != 0:
for l in outfile:
print l
print 'trouble with %s' % cmd
if experr != None:
ret = compFiles(errfile, experr, inbase, 'test/')
if ret != 0:
print 'trouble with %s' % cmd
else:
if not noErrors:
if len(errfile) != 0:
for l in errfile:
print l
print 'trouble with %s' % cmd
if 'stdin' not in testDescription:
pin.close()
# This routine is called by the parameter decoding routine whenever the end of a
# 'test' section is encountered. Depending upon file globbing, a large number of
# individual tests may be run.
def runTest(description):
testDescription = defaultParams.copy() # set defaults
testDescription.update(description) # override with current ent
if 'testname' in testDescription:
print "## %s" % testDescription['testname']
if not 'file' in testDescription:
print "No file specified - can't run this test!"
return
# Set up the source and results directory paths from the decoded params
dir = ''
if 'srcdir' in testDescription:
dir += testDescription['srcdir'] + '/'
if 'srcsub' in testDescription:
dir += testDescription['srcsub'] + '/'
rdir = ''
if 'resdir' in testDescription:
rdir += testDescription['resdir'] + '/'
if 'ressub' in testDescription:
rdir += testDescription['ressub'] + '/'
testFiles = glob.glob(os.path.abspath(dir + testDescription['file']))
if testFiles == []:
print "No files result from '%s'" % testDescription['file']
return
# Some test programs just don't work (yet). For now we exclude them.
count = 0
excl = []
if 'exclfile' in testDescription:
for f in string.split(testDescription['exclfile']):
glb = glob.glob(dir + f)
for g in glb:
excl.append(os.path.abspath(g))
# Run the specified test program
for f in testFiles:
if not os.path.isdir(f):
if f not in excl:
count = count + 1
runOneTest(testDescription, os.path.basename(f), dir, rdir)
#
# The following classes are used with the xmlreader interface to interpret the
# parameter file. Once a test section has been identified, runTest is called
# with a dictionary containing the parsed results of the interpretation.
#
class testDefaults:
curText = '' # accumulates text content of parameter
def addToDict(self, key):
txt = string.strip(self.curText)
# if txt == '':
# return
if key not in defaultParams:
defaultParams[key] = txt
else:
defaultParams[key] += ' ' + txt
def processNode(self, reader, curClass):
if reader.Depth() == 2:
if reader.NodeType() == 1:
self.curText = '' # clear the working variable
elif reader.NodeType() == 15:
if (reader.Name() != '#text') and (reader.Name() != '#comment'):
self.addToDict(reader.Name())
elif reader.Depth() == 3:
if reader.Name() == '#text':
self.curText += reader.Value()
elif reader.NodeType() == 15: # end of element
print "Defaults have been set to:"
for k in defaultParams.keys():
print " %s : '%s'" % (k, defaultParams[k])
curClass = rootClass()
return curClass
class testClass:
def __init__(self):
self.testParams = {} # start with an empty set of params
self.curText = '' # and empty text
def addToDict(self, key):
data = string.strip(self.curText)
if key not in self.testParams:
self.testParams[key] = data
else:
if self.testParams[key] != '':
data = ' ' + data
self.testParams[key] += data
def processNode(self, reader, curClass):
if reader.Depth() == 2:
if reader.NodeType() == 1:
self.curText = '' # clear the working variable
if reader.Name() not in self.testParams:
self.testParams[reader.Name()] = ''
elif reader.NodeType() == 15:
if (reader.Name() != '#text') and (reader.Name() != '#comment'):
self.addToDict(reader.Name())
elif reader.Depth() == 3:
if reader.Name() == '#text':
self.curText += reader.Value()
elif reader.NodeType() == 15: # end of element
runTest(self.testParams)
curClass = rootClass()
return curClass
class rootClass:
def processNode(self, reader, curClass):
if reader.Depth() == 0:
return curClass
if reader.Depth() != 1:
print "Unexpected junk: Level %d, type %d, name %s" % (
reader.Depth(), reader.NodeType(), reader.Name())
return curClass
if reader.Name() == 'test':
curClass = testClass()
curClass.testParams = {}
elif reader.Name() == 'defaults':
curClass = testDefaults()
return curClass
def streamFile(filename):
try:
reader = libxml2.newTextReaderFilename(filename)
except:
print "unable to open %s" % (filename)
return
curClass = rootClass()
ret = reader.Read()
while ret == 1:
curClass = curClass.processNode(reader, curClass)
ret = reader.Read()
if ret != 0:
print "%s : failed to parse" % (filename)
# OK, we're finished with all the routines. Now for the main program:-
if len(sys.argv) != 2:
print "Usage: maketest {filename}"
sys.exit(-1)
streamFile(sys.argv[1])
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* Copyright 2017 - 2025 R. Thomas
* Copyright 2017 - 2025 Quarkslab
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "LIEF/ELF/hash.hpp"
#include "LIEF/ELF/SymbolVersionAuxRequirement.hpp"
#include "LIEF/ELF/SymbolVersionRequirement.hpp"
#include "ELF/Structures.hpp"
namespace LIEF {
namespace ELF {
SymbolVersionRequirement::SymbolVersionRequirement(const details::Elf64_Verneed& header) :
version_{header.vn_version}
{}
SymbolVersionRequirement::SymbolVersionRequirement(const details::Elf32_Verneed& header) :
version_{header.vn_version}
{}
SymbolVersionRequirement::SymbolVersionRequirement(const SymbolVersionRequirement& other) :
Object{other},
version_{other.version_},
name_{other.name_}
{
aux_requirements_.reserve(other.aux_requirements_.size());
for (const std::unique_ptr<SymbolVersionAuxRequirement>& aux : other.aux_requirements_) {
aux_requirements_.push_back(std::make_unique<SymbolVersionAuxRequirement>(*aux));
}
}
SymbolVersionRequirement& SymbolVersionRequirement::operator=(SymbolVersionRequirement other) {
swap(other);
return *this;
}
void SymbolVersionRequirement::swap(SymbolVersionRequirement& other) {
std::swap(aux_requirements_, other.aux_requirements_);
std::swap(version_, other.version_);
std::swap(name_, other.name_);
}
SymbolVersionAuxRequirement& SymbolVersionRequirement::add_aux_requirement(const SymbolVersionAuxRequirement& aux_requirement) {
aux_requirements_.push_back(std::make_unique<SymbolVersionAuxRequirement>(aux_requirement));
return *aux_requirements_.back();
}
bool SymbolVersionRequirement::remove_aux_requirement(SymbolVersionAuxRequirement& aux) {
auto it = std::find_if(aux_requirements_.begin(), aux_requirements_.end(),
[&aux] (const std::unique_ptr<SymbolVersionAuxRequirement>& element) {
return &aux == element.get();
}
);
if (it == aux_requirements_.end()) {
return false;
}
aux_requirements_.erase(it);
return true;
}
const SymbolVersionAuxRequirement*
SymbolVersionRequirement::find_aux(const std::string& name) const
{
auto it = std::find_if(aux_requirements_.begin(), aux_requirements_.end(),
[&name] (const std::unique_ptr<SymbolVersionAuxRequirement>& aux) {
return aux->name() == name;
}
);
if (it == aux_requirements_.end()) {
return nullptr;
}
return it->get();
}
void SymbolVersionRequirement::accept(Visitor& visitor) const {
visitor.visit(*this);
}
}
}
|
cpp
|
github
|
https://github.com/nodejs/node
|
deps/LIEF/src/ELF/SymbolVersionRequirement.cpp
|
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.expressions.expression import Expression
from cvxpy.atoms.norm import norm
from cvxpy.atoms.elementwise.norm2_elemwise import norm2_elemwise
from cvxpy.atoms.affine.reshape import reshape
from cvxpy.atoms.affine.sum_entries import sum_entries
def tv(value, *args):
"""Total variation of a vector, matrix, or list of matrices.
Uses L1 norm of discrete gradients for vectors and
L2 norm of discrete gradients for matrices.
Parameters
----------
value : Expression or numeric constant
The value to take the total variation of.
args : Matrix constants/expressions
Additional matrices extending the third dimension of value.
Returns
-------
Expression
An Expression representing the total variation.
"""
value = Expression.cast_to_const(value)
rows, cols = value.size
if value.is_scalar():
raise ValueError("tv cannot take a scalar argument.")
# L1 norm for vectors.
elif value.is_vector():
return norm(value[1:] - value[0:max(rows, cols)-1], 1)
# L2 norm for matrices.
else:
args = map(Expression.cast_to_const, args)
values = [value] + list(args)
diffs = []
for mat in values:
diffs += [
mat[0:rows-1, 1:cols] - mat[0:rows-1, 0:cols-1],
mat[1:rows, 0:cols-1] - mat[0:rows-1, 0:cols-1],
]
return sum_entries(norm2_elemwise(*diffs))
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +groupName=events.k8s.io
package events
|
go
|
github
|
https://github.com/kubernetes/kubernetes
|
pkg/apis/events/doc.go
|
from south.db import db
from django.db import models
from cmsplugin_filer_teaser.models import *
from cmsplugin_filer_utils.migration import rename_tables_new_to_old
class Migration:
cms_plugin_table_mapping = (
# (old_name, new_name),
('cmsplugin_filerteaser', 'cmsplugin_filer_teaser_filerteaser'),
)
def forwards(self, orm):
rename_tables_new_to_old(db, self.cms_plugin_table_mapping)
# Adding field 'FilerTeaser.style'
db.add_column('cmsplugin_filerteaser', 'style', orm['cmsplugin_filer_teaser.filerteaser:style'])
def backwards(self, orm):
rename_tables_new_to_old(db, self.cms_plugin_table_mapping)
# Deleting field 'FilerTeaser.style'
db.delete_column('cmsplugin_filerteaser', 'style')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.CMSPlugin']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'menu_login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cmsplugin_filer_teaser.filerteaser': {
'Meta': {'db_table': "'cmsplugin_filerteaser'"},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'free_link': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
'page_link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'use_autoscale': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_file_type_plugin_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'file_field': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'unique_together': "(('parent', 'name'),)"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cmsplugin_filer_teaser']
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""'Mobile phones, devices' part of product categories dictionary.
Must hold subcategories of 'Mobile phones, devices'
category in the form of python dictionary data type.
"""
mobile_phones_devices = {('mobile phones, PDAs', 'мобильные телефоны, устройства'): {
('apple', 'apple'): {
('devices', 'устройства'): {},
('accessories', 'аксессуары'): {
('batteries', 'батареи'): {},
('cables, adapters', 'кабели, адаптеры'): {},
('car kits', 'для машины'): {},
('cases, covers, skins', 'кейсы, скины, чехлы'): {},
('chargers', 'зарядные устройства'): {},
('charms', 'медальоны'): {},
('faceplates', 'лицевые панели'): {},
('headsets', 'наушники'): {},
('memory cards', 'память'): {},
('mounts, holders', 'крепежи, держатели'): {},
('screen protectors', 'защита экрана'): {},
('straps', 'ремешки'): {},
('styluses', 'стилус'): {},
('other', 'другое'): {},
},
('parts', 'запчасти'): {},
('devices for parts', 'устройства на запчасти'): {},
('other', 'другое'): {},
},
('HTC', 'HTC'): {
('devices', 'устройства'): {},
('accessories', 'аксессуары'): {
('batteries', 'батареи'): {},
('cables, adapters', 'кабели, адаптеры'): {},
('car kits', 'для машины'): {},
('cases, covers, skins', 'кейсы, скины, чехлы'): {},
('chargers', 'зарядные устройства'): {},
('charms', 'медальоны'): {},
('faceplates', 'лицевые панели'): {},
('headsets', 'наушники'): {},
('memory cards', 'память'): {},
('mounts, holders', 'крепежи, держатели'): {},
('screen protectors', 'защита экрана'): {},
('straps', 'ремешки'): {},
('styluses', 'стилус'): {},
('other', 'другое'): {},
},
('parts', 'запчасти'): {},
('devices for parts', 'устройства на запчасти'): {},
('other', 'другое'): {},
},
('LG', 'LG'): {
('devices', 'устройства'): {},
('accessories', 'аксессуары'): {
('batteries', 'батареи'): {},
('cables, adapters', 'кабели, адаптеры'): {},
('car kits', 'для машины'): {},
('cases, covers, skins', 'кейсы, скины, чехлы'): {},
('chargers', 'зарядные устройства'): {},
('charms', 'медальоны'): {},
('faceplates', 'лицевые панели'): {},
('headsets', 'наушники'): {},
('memory cards', 'память'): {},
('mounts, holders', 'крепежи, держатели'): {},
('screen protectors', 'защита экрана'): {},
('straps', 'ремешки'): {},
('styluses', 'стилус'): {},
('other', 'другое'): {},
},
('parts', 'запчасти'): {},
('devices for parts', 'устройства на запчасти'): {},
('other', 'другое'): {},
},
('lenovo', 'lenovo'): {
('devices', 'устройства'): {},
('accessories', 'аксессуары'): {
('batteries', 'батареи'): {},
('cables, adapters', 'кабели, адаптеры'): {},
('car kits', 'для машины'): {},
('cases, covers, skins', 'кейсы, скины, чехлы'): {},
('chargers', 'зарядные устройства'): {},
('charms', 'медальоны'): {},
('faceplates', 'лицевые панели'): {},
('headsets', 'наушники'): {},
('memory cards', 'память'): {},
('mounts, holders', 'крепежи, держатели'): {},
('screen protectors', 'защита экрана'): {},
('straps', 'ремешки'): {},
('styluses', 'стилус'): {},
('other', 'другое'): {},
},
('parts', 'запчасти'): {},
('devices for parts', 'устройства на запчасти'): {},
('other', 'другое'): {},
},
('motorola', 'motorola'): {
('devices', 'устройства'): {},
('accessories', 'аксессуары'): {
('batteries', 'батареи'): {},
('cables, adapters', 'кабели, адаптеры'): {},
('car kits', 'для машины'): {},
('cases, covers, skins', 'кейсы, скины, чехлы'): {},
('chargers', 'зарядные устройства'): {},
('charms', 'медальоны'): {},
('faceplates', 'лицевые панели'): {},
('headsets', 'наушники'): {},
('memory cards', 'память'): {},
('mounts, holders', 'крепежи, держатели'): {},
('screen protectors', 'защита экрана'): {},
('straps', 'ремешки'): {},
('styluses', 'стилус'): {},
('other', 'другое'): {},
},
('parts', 'запчасти'): {},
('devices for parts', 'устройства на запчасти'): {},
('other', 'другое'): {},
},
('nokia', 'nokia'): {
('devices', 'устройства'): {},
('accessories', 'аксессуары'): {
('batteries', 'батареи'): {},
('cables, adapters', 'кабели, адаптеры'): {},
('car kits', 'для машины'): {},
('cases, covers, skins', 'кейсы, скины, чехлы'): {},
('chargers', 'зарядные устройства'): {},
('charms', 'медальоны'): {},
('faceplates', 'лицевые панели'): {},
('headsets', 'наушники'): {},
('memory cards', 'память'): {},
('mounts, holders', 'крепежи, держатели'): {},
('screen protectors', 'защита экрана'): {},
('straps', 'ремешки'): {},
('styluses', 'стилус'): {},
('other', 'другое'): {},
},
('parts', 'запчасти'): {},
('devices for parts', 'устройства на запчасти'): {},
('other', 'другое'): {},
},
('samsung', 'samsung'): {
('devices', 'устройства'): {},
('accessories', 'аксессуары'): {
('batteries', 'батареи'): {},
('cables, adapters', 'кабели, адаптеры'): {},
('car kits', 'для машины'): {},
('cases, covers, skins', 'кейсы, скины, чехлы'): {},
('chargers', 'зарядные устройства'): {},
('charms', 'медальоны'): {},
('faceplates', 'лицевые панели'): {},
('headsets', 'наушники'): {},
('memory cards', 'память'): {},
('mounts, holders', 'крепежи, держатели'): {},
('screen protectors', 'защита экрана'): {},
('straps', 'ремешки'): {},
('styluses', 'стилус'): {},
('other', 'другое'): {},
},
('parts', 'запчасти'): {},
('devices for parts', 'устройства на запчасти'): {},
('other', 'другое'): {},
},
('Sony Ericsson', 'Sony Ericsson'): {
('devices', 'устройства'): {},
('accessories', 'аксессуары'): {
('batteries', 'батареи'): {},
('cables, adapters', 'кабели, адаптеры'): {},
('car kits', 'для машины'): {},
('cases, covers, skins', 'кейсы, скины, чехлы'): {},
('chargers', 'зарядные устройства'): {},
('charms', 'медальоны'): {},
('faceplates', 'лицевые панели'): {},
('headsets', 'наушники'): {},
('memory cards', 'память'): {},
('mounts, holders', 'крепежи, держатели'): {},
('screen protectors', 'защита экрана'): {},
('straps', 'ремешки'): {},
('styluses', 'стилус'): {},
('other', 'другое'): {},
},
('parts', 'запчасти'): {},
('devices for parts', 'устройства на запчасти'): {},
('other', 'другое'): {},
},
('other brands', 'другие бренды'): {
('devices', 'устройства'): {},
('accessories', 'аксессуары'): {
('batteries', 'батареи'): {},
('cables, adapters', 'кабели, адаптеры'): {},
('car kits', 'для машины'): {},
('cases, covers, skins', 'кейсы, скины, чехлы'): {},
('chargers', 'зарядные устройства'): {},
('charms', 'медальоны'): {},
('faceplates', 'лицевые панели'): {},
('headsets', 'наушники'): {},
('memory cards', 'память'): {},
('mounts, holders', 'крепежи, держатели'): {},
('screen protectors', 'защита экрана'): {},
('straps', 'ремешки'): {},
('styluses', 'стилус'): {},
('other', 'другое'): {},
},
('parts', 'запчасти'): {},
('devices for parts', 'устройства на запчасти'): {},
('other', 'другое'): {},
},
('other', 'другое'): {},
#phones
#smartphones
#display phones
#('PDAs, pocket PCs',): {
}}
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""Tests of Beautiful Soup as a whole."""
import logging
import unittest
import sys
from bs4 import (
BeautifulSoup,
BeautifulStoneSoup,
)
from bs4.element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
SoupStrainer,
NamespacedAttribute,
)
import bs4.dammit
from bs4.dammit import EntitySubstitution, UnicodeDammit
from bs4.testing import (
SoupTest,
skipIf,
)
import warnings
try:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
LXML_PRESENT = True
except ImportError, e:
LXML_PRESENT = False
PYTHON_2_PRE_2_7 = (sys.version_info < (2,7))
PYTHON_3_PRE_3_2 = (sys.version_info[0] == 3 and sys.version_info < (3,2))
class TestDeprecatedConstructorArguments(SoupTest):
def test_parseOnlyThese_renamed_to_parse_only(self):
with warnings.catch_warnings(record=True) as w:
soup = self.soup("<a><b></b></a>", parseOnlyThese=SoupStrainer("b"))
msg = str(w[0].message)
self.assertTrue("parseOnlyThese" in msg)
self.assertTrue("parse_only" in msg)
self.assertEqual(b"<b></b>", soup.encode())
def test_fromEncoding_renamed_to_from_encoding(self):
with warnings.catch_warnings(record=True) as w:
utf8 = b"\xc3\xa9"
soup = self.soup(utf8, fromEncoding="utf8")
msg = str(w[0].message)
self.assertTrue("fromEncoding" in msg)
self.assertTrue("from_encoding" in msg)
self.assertEqual("utf8", soup.original_encoding)
def test_unrecognized_keyword_argument(self):
self.assertRaises(
TypeError, self.soup, "<a>", no_such_argument=True)
@skipIf(
not LXML_PRESENT,
"lxml not present, not testing BeautifulStoneSoup.")
def test_beautifulstonesoup(self):
with warnings.catch_warnings(record=True) as w:
soup = BeautifulStoneSoup("<markup>")
self.assertTrue(isinstance(soup, BeautifulSoup))
self.assertTrue("BeautifulStoneSoup class is deprecated")
class TestSelectiveParsing(SoupTest):
def test_parse_with_soupstrainer(self):
markup = "No<b>Yes</b><a>No<b>Yes <c>Yes</c></b>"
strainer = SoupStrainer("b")
soup = self.soup(markup, parse_only=strainer)
self.assertEqual(soup.encode(), b"<b>Yes</b><b>Yes <c>Yes</c></b>")
class TestEntitySubstitution(unittest.TestCase):
"""Standalone tests of the EntitySubstitution class."""
def setUp(self):
self.sub = EntitySubstitution
def test_simple_html_substitution(self):
# Unicode characters corresponding to named HTML entites
# are substituted, and no others.
s = u"foo\u2200\N{SNOWMAN}\u00f5bar"
self.assertEqual(self.sub.substitute_html(s),
u"foo∀\N{SNOWMAN}õbar")
def test_smart_quote_substitution(self):
# MS smart quotes are a common source of frustration, so we
# give them a special test.
quotes = b"\x91\x92foo\x93\x94"
dammit = UnicodeDammit(quotes)
self.assertEqual(self.sub.substitute_html(dammit.markup),
"‘’foo“”")
def test_xml_converstion_includes_no_quotes_if_make_quoted_attribute_is_false(self):
s = 'Welcome to "my bar"'
self.assertEqual(self.sub.substitute_xml(s, False), s)
def test_xml_attribute_quoting_normally_uses_double_quotes(self):
self.assertEqual(self.sub.substitute_xml("Welcome", True),
'"Welcome"')
self.assertEqual(self.sub.substitute_xml("Bob's Bar", True),
'"Bob\'s Bar"')
def test_xml_attribute_quoting_uses_single_quotes_when_value_contains_double_quotes(self):
s = 'Welcome to "my bar"'
self.assertEqual(self.sub.substitute_xml(s, True),
"'Welcome to \"my bar\"'")
def test_xml_attribute_quoting_escapes_single_quotes_when_value_contains_both_single_and_double_quotes(self):
s = 'Welcome to "Bob\'s Bar"'
self.assertEqual(
self.sub.substitute_xml(s, True),
'"Welcome to "Bob\'s Bar""')
def test_xml_quotes_arent_escaped_when_value_is_not_being_quoted(self):
quoted = 'Welcome to "Bob\'s Bar"'
self.assertEqual(self.sub.substitute_xml(quoted), quoted)
def test_xml_quoting_handles_angle_brackets(self):
self.assertEqual(
self.sub.substitute_xml("foo<bar>"),
"foo<bar>")
def test_xml_quoting_handles_ampersands(self):
self.assertEqual(self.sub.substitute_xml("AT&T"), "AT&T")
def test_xml_quoting_ignores_ampersands_when_they_are_part_of_an_entity(self):
self.assertEqual(
self.sub.substitute_xml("ÁT&T"),
"ÁT&T")
def test_quotes_not_html_substituted(self):
"""There's no need to do this except inside attribute values."""
text = 'Bob\'s "bar"'
self.assertEqual(self.sub.substitute_html(text), text)
class TestEncodingConversion(SoupTest):
# Test Beautiful Soup's ability to decode and encode from various
# encodings.
def setUp(self):
super(TestEncodingConversion, self).setUp()
self.unicode_data = u'<html><head><meta charset="utf-8"/></head><body><foo>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</foo></body></html>'
self.utf8_data = self.unicode_data.encode("utf-8")
# Just so you know what it looks like.
self.assertEqual(
self.utf8_data,
b'<html><head><meta charset="utf-8"/></head><body><foo>Sacr\xc3\xa9 bleu!</foo></body></html>')
def test_ascii_in_unicode_out(self):
# ASCII input is converted to Unicode. The original_encoding
# attribute is set.
ascii = b"<foo>a</foo>"
soup_from_ascii = self.soup(ascii)
unicode_output = soup_from_ascii.decode()
self.assertTrue(isinstance(unicode_output, unicode))
self.assertEqual(unicode_output, self.document_for(ascii.decode()))
self.assertEqual(soup_from_ascii.original_encoding.lower(), "ascii")
def test_unicode_in_unicode_out(self):
# Unicode input is left alone. The original_encoding attribute
# is not set.
soup_from_unicode = self.soup(self.unicode_data)
self.assertEqual(soup_from_unicode.decode(), self.unicode_data)
self.assertEqual(soup_from_unicode.foo.string, u'Sacr\xe9 bleu!')
self.assertEqual(soup_from_unicode.original_encoding, None)
def test_utf8_in_unicode_out(self):
# UTF-8 input is converted to Unicode. The original_encoding
# attribute is set.
soup_from_utf8 = self.soup(self.utf8_data)
self.assertEqual(soup_from_utf8.decode(), self.unicode_data)
self.assertEqual(soup_from_utf8.foo.string, u'Sacr\xe9 bleu!')
def test_utf8_out(self):
# The internal data structures can be encoded as UTF-8.
soup_from_unicode = self.soup(self.unicode_data)
self.assertEqual(soup_from_unicode.encode('utf-8'), self.utf8_data)
@skipIf(
PYTHON_2_PRE_2_7 or PYTHON_3_PRE_3_2,
"Bad HTMLParser detected; skipping test of non-ASCII characters in attribute name.")
def test_attribute_name_containing_unicode_characters(self):
markup = u'<div><a \N{SNOWMAN}="snowman"></a></div>'
self.assertEqual(self.soup(markup).div.encode("utf8"), markup.encode("utf8"))
class TestUnicodeDammit(unittest.TestCase):
"""Standalone tests of Unicode, Dammit."""
def test_smart_quotes_to_unicode(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup)
self.assertEqual(
dammit.unicode_markup, u"<foo>\u2018\u2019\u201c\u201d</foo>")
def test_smart_quotes_to_xml_entities(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="xml")
self.assertEqual(
dammit.unicode_markup, "<foo>‘’“”</foo>")
def test_smart_quotes_to_html_entities(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="html")
self.assertEqual(
dammit.unicode_markup, "<foo>‘’“”</foo>")
def test_smart_quotes_to_ascii(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="ascii")
self.assertEqual(
dammit.unicode_markup, """<foo>''""</foo>""")
def test_detect_utf8(self):
utf8 = b"\xc3\xa9"
dammit = UnicodeDammit(utf8)
self.assertEqual(dammit.unicode_markup, u'\xe9')
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
def test_convert_hebrew(self):
hebrew = b"\xed\xe5\xec\xf9"
dammit = UnicodeDammit(hebrew, ["iso-8859-8"])
self.assertEqual(dammit.original_encoding.lower(), 'iso-8859-8')
self.assertEqual(dammit.unicode_markup, u'\u05dd\u05d5\u05dc\u05e9')
def test_dont_see_smart_quotes_where_there_are_none(self):
utf_8 = b"\343\202\261\343\203\274\343\202\277\343\202\244 Watch"
dammit = UnicodeDammit(utf_8)
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
self.assertEqual(dammit.unicode_markup.encode("utf-8"), utf_8)
def test_ignore_inappropriate_codecs(self):
utf8_data = u"Räksmörgås".encode("utf-8")
dammit = UnicodeDammit(utf8_data, ["iso-8859-8"])
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
def test_ignore_invalid_codecs(self):
utf8_data = u"Räksmörgås".encode("utf-8")
for bad_encoding in ['.utf8', '...', 'utF---16.!']:
dammit = UnicodeDammit(utf8_data, [bad_encoding])
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
def test_detect_html5_style_meta_tag(self):
for data in (
b'<html><meta charset="euc-jp" /></html>',
b"<html><meta charset='euc-jp' /></html>",
b"<html><meta charset=euc-jp /></html>",
b"<html><meta charset=euc-jp/></html>"):
dammit = UnicodeDammit(data, is_html=True)
self.assertEqual(
"euc-jp", dammit.original_encoding)
def test_last_ditch_entity_replacement(self):
# This is a UTF-8 document that contains bytestrings
# completely incompatible with UTF-8 (ie. encoded with some other
# encoding).
#
# Since there is no consistent encoding for the document,
# Unicode, Dammit will eventually encode the document as UTF-8
# and encode the incompatible characters as REPLACEMENT
# CHARACTER.
#
# If chardet is installed, it will detect that the document
# can be converted into ISO-8859-1 without errors. This happens
# to be the wrong encoding, but it is a consistent encoding, so the
# code we're testing here won't run.
#
# So we temporarily disable chardet if it's present.
doc = b"""\357\273\277<?xml version="1.0" encoding="UTF-8"?>
<html><b>\330\250\330\252\330\261</b>
<i>\310\322\321\220\312\321\355\344</i></html>"""
chardet = bs4.dammit.chardet_dammit
logging.disable(logging.WARNING)
try:
def noop(str):
return None
bs4.dammit.chardet_dammit = noop
dammit = UnicodeDammit(doc)
self.assertEqual(True, dammit.contains_replacement_characters)
self.assertTrue(u"\ufffd" in dammit.unicode_markup)
soup = BeautifulSoup(doc, "html.parser")
self.assertTrue(soup.contains_replacement_characters)
finally:
logging.disable(logging.NOTSET)
bs4.dammit.chardet_dammit = chardet
def test_sniffed_xml_encoding(self):
# A document written in UTF-16LE will be converted by a different
# code path that sniffs the byte order markers.
data = b'\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00'
dammit = UnicodeDammit(data)
self.assertEqual(u"<a>áé</a>", dammit.unicode_markup)
self.assertEqual("utf-16le", dammit.original_encoding)
def test_detwingle(self):
# Here's a UTF8 document.
utf8 = (u"\N{SNOWMAN}" * 3).encode("utf8")
# Here's a Windows-1252 document.
windows_1252 = (
u"\N{LEFT DOUBLE QUOTATION MARK}Hi, I like Windows!"
u"\N{RIGHT DOUBLE QUOTATION MARK}").encode("windows_1252")
# Through some unholy alchemy, they've been stuck together.
doc = utf8 + windows_1252 + utf8
# The document can't be turned into UTF-8:
self.assertRaises(UnicodeDecodeError, doc.decode, "utf8")
# Unicode, Dammit thinks the whole document is Windows-1252,
# and decodes it into "☃☃☃“Hi, I like Windows!”☃☃☃"
# But if we run it through fix_embedded_windows_1252, it's fixed:
fixed = UnicodeDammit.detwingle(doc)
self.assertEqual(
u"☃☃☃“Hi, I like Windows!”☃☃☃", fixed.decode("utf8"))
def test_detwingle_ignores_multibyte_characters(self):
# Each of these characters has a UTF-8 representation ending
# in \x93. \x93 is a smart quote if interpreted as
# Windows-1252. But our code knows to skip over multibyte
# UTF-8 characters, so they'll survive the process unscathed.
for tricky_unicode_char in (
u"\N{LATIN SMALL LIGATURE OE}", # 2-byte char '\xc5\x93'
u"\N{LATIN SUBSCRIPT SMALL LETTER X}", # 3-byte char '\xe2\x82\x93'
u"\xf0\x90\x90\x93", # This is a CJK character, not sure which one.
):
input = tricky_unicode_char.encode("utf8")
self.assertTrue(input.endswith(b'\x93'))
output = UnicodeDammit.detwingle(input)
self.assertEqual(output, input)
class TestNamedspacedAttribute(SoupTest):
def test_name_may_be_none(self):
a = NamespacedAttribute("xmlns", None)
self.assertEqual(a, "xmlns")
def test_attribute_is_equivalent_to_colon_separated_string(self):
a = NamespacedAttribute("a", "b")
self.assertEqual("a:b", a)
def test_attributes_are_equivalent_if_prefix_and_name_identical(self):
a = NamespacedAttribute("a", "b", "c")
b = NamespacedAttribute("a", "b", "c")
self.assertEqual(a, b)
# The actual namespace is not considered.
c = NamespacedAttribute("a", "b", None)
self.assertEqual(a, c)
# But name and prefix are important.
d = NamespacedAttribute("a", "z", "c")
self.assertNotEqual(a, d)
e = NamespacedAttribute("z", "b", "c")
self.assertNotEqual(a, e)
class TestAttributeValueWithCharsetSubstitution(unittest.TestCase):
def test_content_meta_attribute_value(self):
value = CharsetMetaAttributeValue("euc-jp")
self.assertEqual("euc-jp", value)
self.assertEqual("euc-jp", value.original_value)
self.assertEqual("utf8", value.encode("utf8"))
def test_content_meta_attribute_value(self):
value = ContentMetaAttributeValue("text/html; charset=euc-jp")
self.assertEqual("text/html; charset=euc-jp", value)
self.assertEqual("text/html; charset=euc-jp", value.original_value)
self.assertEqual("text/html; charset=utf8", value.encode("utf8"))
|
unknown
|
codeparrot/codeparrot-clean
| ||
:host,
label {
display: inline-flex;
gap: 0.5em;
align-items: center;
}
.docs-label {
font-size: 0.875rem;
font-style: normal;
font-weight: 500;
line-height: 160%; // 1.4rem
letter-spacing: -0.00875rem;
color: var(--quaternary-contrast);
}
.docs-toggle {
position: relative;
display: inline-block;
width: 3rem;
height: 1.5rem;
border: 1px solid var(--senary-contrast);
border-radius: 34px;
input {
opacity: 0;
width: 0;
height: 0;
}
}
.docs-slider {
position: absolute;
cursor: pointer;
border-radius: 34px;
inset: 0;
background-color: var(--septenary-contrast);
transition: background-color 0.3s ease, border-color 0.3s ease;
// background
&::before {
content: '';
position: absolute;
inset: 0;
border-radius: 34px;
background: var(--pink-to-purple-horizontal-gradient);
opacity: 0;
transition: opacity 0.3s ease;
}
// toggle knob
&::after {
position: absolute;
content: '';
height: 1.25rem;
width: 1.25rem;
left: 0.125rem;
bottom: 0.125rem;
background-color: var(--page-background);
transition: transform 0.3s ease, background-color 0.3s ease;
border-radius: 50%;
}
}
input {
&:checked + .docs-slider {
// background
&::before {
opacity: 1;
}
// toggle knob
&::after {
transform: translateX(1.5rem);
}
}
}
|
unknown
|
github
|
https://github.com/angular/angular
|
adev/shared-docs/components/slide-toggle/slide-toggle.component.scss
|
# Copyright (c) Jeroen Van Steirteghem
# See LICENSE
import twunnel.proxy_server
def setDefaultConfiguration(configuration, keys):
twunnel.proxy_server.setDefaultConfiguration(configuration, keys)
if "REMOTE_PROXY_SERVER" in keys:
configuration.setdefault("REMOTE_PROXY_SERVER", {})
configuration["REMOTE_PROXY_SERVER"].setdefault("TYPE", "")
if configuration["REMOTE_PROXY_SERVER"]["TYPE"] == "SSH":
configuration["REMOTE_PROXY_SERVER"].setdefault("ADDRESS", "")
configuration["REMOTE_PROXY_SERVER"].setdefault("PORT", 0)
configuration["REMOTE_PROXY_SERVER"].setdefault("KEY", {})
configuration["REMOTE_PROXY_SERVER"]["KEY"].setdefault("PUBLIC", {})
configuration["REMOTE_PROXY_SERVER"]["KEY"]["PUBLIC"].setdefault("FILE", "")
configuration["REMOTE_PROXY_SERVER"]["KEY"]["PUBLIC"].setdefault("PASSPHRASE", "")
configuration["REMOTE_PROXY_SERVER"]["KEY"].setdefault("PRIVATE", {})
configuration["REMOTE_PROXY_SERVER"]["KEY"]["PRIVATE"].setdefault("FILE", "")
configuration["REMOTE_PROXY_SERVER"]["KEY"]["PRIVATE"].setdefault("PASSPHRASE", "")
configuration["REMOTE_PROXY_SERVER"].setdefault("ACCOUNTS", [])
i = 0
while i < len(configuration["REMOTE_PROXY_SERVER"]["ACCOUNTS"]):
configuration["REMOTE_PROXY_SERVER"]["ACCOUNTS"][i].setdefault("NAME", "")
configuration["REMOTE_PROXY_SERVER"]["ACCOUNTS"][i].setdefault("PASSWORD", "")
configuration["REMOTE_PROXY_SERVER"]["ACCOUNTS"][i].setdefault("KEYS", [])
j = 0
while j < len(configuration["REMOTE_PROXY_SERVER"]["ACCOUNTS"][i]["KEYS"]):
configuration["REMOTE_PROXY_SERVER"]["ACCOUNTS"][i]["KEYS"][j].setdefault("PUBLIC", {})
configuration["REMOTE_PROXY_SERVER"]["ACCOUNTS"][i]["KEYS"][j]["PUBLIC"].setdefault("FILE", "")
configuration["REMOTE_PROXY_SERVER"]["ACCOUNTS"][i]["KEYS"][j]["PUBLIC"].setdefault("PASSPHRASE", "")
j = j + 1
configuration["REMOTE_PROXY_SERVER"]["ACCOUNTS"][i].setdefault("CONNECTIONS", 0)
i = i + 1
else:
if configuration["REMOTE_PROXY_SERVER"]["TYPE"] == "SSL":
configuration["REMOTE_PROXY_SERVER"].setdefault("ADDRESS", "")
configuration["REMOTE_PROXY_SERVER"].setdefault("PORT", 0)
configuration["REMOTE_PROXY_SERVER"].setdefault("CERTIFICATE", {})
configuration["REMOTE_PROXY_SERVER"]["CERTIFICATE"].setdefault("FILE", "")
configuration["REMOTE_PROXY_SERVER"]["CERTIFICATE"].setdefault("KEY", {})
configuration["REMOTE_PROXY_SERVER"]["CERTIFICATE"]["KEY"].setdefault("FILE", "")
configuration["REMOTE_PROXY_SERVER"].setdefault("ACCOUNTS", [])
i = 0
while i < len(configuration["REMOTE_PROXY_SERVER"]["ACCOUNTS"]):
configuration["REMOTE_PROXY_SERVER"]["ACCOUNTS"][i].setdefault("NAME", "")
configuration["REMOTE_PROXY_SERVER"]["ACCOUNTS"][i].setdefault("PASSWORD", "")
i = i + 1
else:
if configuration["REMOTE_PROXY_SERVER"]["TYPE"] == "WS":
configuration["REMOTE_PROXY_SERVER"].setdefault("ADDRESS", "")
configuration["REMOTE_PROXY_SERVER"].setdefault("PORT", 0)
configuration["REMOTE_PROXY_SERVER"].setdefault("ACCOUNTS", [])
i = 0
while i < len(configuration["REMOTE_PROXY_SERVER"]["ACCOUNTS"]):
configuration["REMOTE_PROXY_SERVER"]["ACCOUNTS"][i].setdefault("NAME", "")
configuration["REMOTE_PROXY_SERVER"]["ACCOUNTS"][i].setdefault("PASSWORD", "")
i = i + 1
else:
if configuration["REMOTE_PROXY_SERVER"]["TYPE"] == "WSS":
configuration["REMOTE_PROXY_SERVER"].setdefault("ADDRESS", "")
configuration["REMOTE_PROXY_SERVER"].setdefault("PORT", 0)
configuration["REMOTE_PROXY_SERVER"].setdefault("CERTIFICATE", {})
configuration["REMOTE_PROXY_SERVER"]["CERTIFICATE"].setdefault("FILE", "")
configuration["REMOTE_PROXY_SERVER"]["CERTIFICATE"].setdefault("KEY", {})
configuration["REMOTE_PROXY_SERVER"]["CERTIFICATE"]["KEY"].setdefault("FILE", "")
configuration["REMOTE_PROXY_SERVER"].setdefault("ACCOUNTS", [])
i = 0
while i < len(configuration["REMOTE_PROXY_SERVER"]["ACCOUNTS"]):
configuration["REMOTE_PROXY_SERVER"]["ACCOUNTS"][i].setdefault("NAME", "")
configuration["REMOTE_PROXY_SERVER"]["ACCOUNTS"][i].setdefault("PASSWORD", "")
i = i + 1
def createPort(configuration):
setDefaultConfiguration(configuration, ["PROXY_SERVERS", "REMOTE_PROXY_SERVER"])
if configuration["REMOTE_PROXY_SERVER"]["TYPE"] == "SSH":
from twunnel.remote_proxy_server__ssh import createSSHPort
return createSSHPort(configuration)
else:
if configuration["REMOTE_PROXY_SERVER"]["TYPE"] == "SSL":
from twunnel.remote_proxy_server__ssl import createSSLPort
return createSSLPort(configuration)
else:
if configuration["REMOTE_PROXY_SERVER"]["TYPE"] == "WS":
from twunnel.remote_proxy_server__ws import createWSPort
return createWSPort(configuration)
else:
if configuration["REMOTE_PROXY_SERVER"]["TYPE"] == "WSS":
from twunnel.remote_proxy_server__ws import createWSPort
return createWSPort(configuration)
else:
return None
|
unknown
|
codeparrot/codeparrot-clean
| ||
import ts from "dedent";
import * as Path from "pathe";
import * as Pathe from "pathe/utils";
import * as Babel from "../vite/babel";
import type { Context } from "./context";
import * as Params from "./params";
import * as Route from "./route";
import type { RouteManifestEntry } from "../config/routes";
export type VirtualFile = { filename: string; content: string };
export function typesDirectory(ctx: Context) {
return Path.join(ctx.rootDirectory, ".react-router/types");
}
export function generateFuture(ctx: Context): VirtualFile {
const filename = Path.join(typesDirectory(ctx), "+future.ts");
const content = ts`
// Generated by React Router
import "react-router";
declare module "react-router" {
interface Future {
v8_middleware: ${ctx.config.future.v8_middleware}
}
}
`;
return { filename, content };
}
export function generateServerBuild(ctx: Context): VirtualFile {
const filename = Path.join(typesDirectory(ctx), "+server-build.d.ts");
const content = ts`
// Generated by React Router
declare module "virtual:react-router/server-build" {
import { ServerBuild } from "react-router";
export const assets: ServerBuild["assets"];
export const assetsBuildDirectory: ServerBuild["assetsBuildDirectory"];
export const basename: ServerBuild["basename"];
export const entry: ServerBuild["entry"];
export const future: ServerBuild["future"];
export const isSpaMode: ServerBuild["isSpaMode"];
export const prerender: ServerBuild["prerender"];
export const publicPath: ServerBuild["publicPath"];
export const routeDiscovery: ServerBuild["routeDiscovery"];
export const routes: ServerBuild["routes"];
export const ssr: ServerBuild["ssr"];
export const allowedActionOrigins: ServerBuild["allowedActionOrigins"];
export const unstable_getCriticalCss: ServerBuild["unstable_getCriticalCss"];
}
`;
return { filename, content };
}
const { t } = Babel;
export function generateRoutes(ctx: Context): Array<VirtualFile> {
// precompute
const fileToRoutes = new Map<string, Set<string>>();
const lineages = new Map<string, Array<RouteManifestEntry>>();
const allPages = new Set<string>();
const routeToPages = new Map<string, Set<string>>();
for (const route of Object.values(ctx.config.routes)) {
// fileToRoutes
let routeIds = fileToRoutes.get(route.file);
if (!routeIds) {
routeIds = new Set();
fileToRoutes.set(route.file, routeIds);
}
routeIds.add(route.id);
// lineages
const lineage = Route.lineage(ctx.config.routes, route);
lineages.set(route.id, lineage);
// pages
const fullpath = Route.fullpath(lineage);
if (!fullpath) continue;
const pages = expand(fullpath);
pages.forEach((page) => allPages.add(page));
// routePages
lineage.forEach(({ id }) => {
let routePages = routeToPages.get(id);
if (!routePages) {
routePages = new Set<string>();
routeToPages.set(id, routePages);
}
pages.forEach((page) => routePages.add(page));
});
}
// +routes.ts
const routesTs: VirtualFile = {
filename: Path.join(typesDirectory(ctx), "+routes.ts"),
content:
ts`
// Generated by React Router
import "react-router"
declare module "react-router" {
interface Register {
pages: Pages
routeFiles: RouteFiles
routeModules: RouteModules
}
}
` +
"\n\n" +
Babel.generate(pagesType(allPages)).code +
"\n\n" +
Babel.generate(routeFilesType({ fileToRoutes, routeToPages })).code +
"\n\n" +
Babel.generate(routeModulesType(ctx)).code,
};
// **/+types/*.ts
const allAnnotations: Array<VirtualFile> = Array.from(fileToRoutes.entries())
.filter(([file]) => isInAppDirectory(ctx, file))
.map(([file, routeIds]) =>
getRouteAnnotations({ ctx, file, routeIds, lineages }),
);
return [routesTs, ...allAnnotations];
}
function pagesType(pages: Set<string>) {
return t.tsTypeAliasDeclaration(
t.identifier("Pages"),
null,
t.tsTypeLiteral(
Array.from(pages).map((page) => {
return t.tsPropertySignature(
t.stringLiteral(page),
t.tsTypeAnnotation(
t.tsTypeLiteral([
t.tsPropertySignature(
t.identifier("params"),
t.tsTypeAnnotation(paramsType(page)),
),
]),
),
);
}),
),
);
}
function routeFilesType({
fileToRoutes,
routeToPages,
}: {
fileToRoutes: Map<string, Set<string>>;
routeToPages: Map<string, Set<string>>;
}) {
return t.tsTypeAliasDeclaration(
t.identifier("RouteFiles"),
null,
t.tsTypeLiteral(
Array.from(fileToRoutes).map(([file, routeIds]) =>
t.tsPropertySignature(
t.stringLiteral(file),
t.tsTypeAnnotation(
t.tsUnionType(
Array.from(routeIds).map((routeId) => {
const pages = routeToPages.get(routeId) ?? new Set();
return t.tsTypeLiteral([
t.tsPropertySignature(
t.identifier("id"),
t.tsTypeAnnotation(
t.tsLiteralType(t.stringLiteral(routeId)),
),
),
t.tsPropertySignature(
t.identifier("page"),
t.tsTypeAnnotation(
pages
? t.tsUnionType(
Array.from(pages).map((page) =>
t.tsLiteralType(t.stringLiteral(page)),
),
)
: t.tsNeverKeyword(),
),
),
]);
}),
),
),
),
),
),
);
}
function routeModulesType(ctx: Context) {
return t.tsTypeAliasDeclaration(
t.identifier("RouteModules"),
null,
t.tsTypeLiteral(
Object.values(ctx.config.routes).map((route) =>
t.tsPropertySignature(
t.stringLiteral(route.id),
t.tsTypeAnnotation(
isInAppDirectory(ctx, route.file)
? t.tsTypeQuery(
t.tsImportType(
t.stringLiteral(
`./${Path.relative(ctx.rootDirectory, ctx.config.appDirectory)}/${route.file}`,
),
),
)
: t.tsUnknownKeyword(),
),
),
),
),
);
}
function isInAppDirectory(ctx: Context, routeFile: string): boolean {
const path = Path.resolve(ctx.config.appDirectory, routeFile);
return path.startsWith(ctx.config.appDirectory);
}
function getRouteAnnotations({
ctx,
file,
routeIds,
lineages,
}: {
ctx: Context;
file: string;
routeIds: Set<string>;
lineages: Map<string, Array<RouteManifestEntry>>;
}) {
const filename = Path.join(
typesDirectory(ctx),
Path.relative(ctx.rootDirectory, ctx.config.appDirectory),
Path.dirname(file),
"+types",
Pathe.filename(file) + ".ts",
);
const matchesType = t.tsTypeAliasDeclaration(
t.identifier("Matches"),
null,
t.tsUnionType(
Array.from(routeIds).map((routeId) => {
const lineage = lineages.get(routeId)!;
return t.tsTupleType(
lineage.map((route) =>
t.tsTypeLiteral([
t.tsPropertySignature(
t.identifier("id"),
t.tsTypeAnnotation(t.tsLiteralType(t.stringLiteral(route.id))),
),
t.tsPropertySignature(
t.identifier("module"),
t.tsTypeAnnotation(
t.tsTypeQuery(
t.tsImportType(
t.stringLiteral(
relativeImportSource(
rootDirsPath(ctx, filename),
Path.resolve(ctx.config.appDirectory, route.file),
),
),
),
),
),
),
]),
),
);
}),
),
);
const routeImportSource = relativeImportSource(
rootDirsPath(ctx, filename),
Path.resolve(ctx.config.appDirectory, file),
);
const content =
ts`
// Generated by React Router
import type { GetInfo, GetAnnotations } from "react-router/internal";
type Module = typeof import("${routeImportSource}")
type Info = GetInfo<{
file: "${file}",
module: Module
}>
` +
"\n\n" +
Babel.generate(matchesType).code +
"\n\n" +
ts`
type Annotations = GetAnnotations<Info & { module: Module, matches: Matches }, ${ctx.rsc}>;
export namespace Route {
// links
export type LinkDescriptors = Annotations["LinkDescriptors"];
export type LinksFunction = Annotations["LinksFunction"];
// meta
export type MetaArgs = Annotations["MetaArgs"];
export type MetaDescriptors = Annotations["MetaDescriptors"];
export type MetaFunction = Annotations["MetaFunction"];
// headers
export type HeadersArgs = Annotations["HeadersArgs"];
export type HeadersFunction = Annotations["HeadersFunction"];
// middleware
export type MiddlewareFunction = Annotations["MiddlewareFunction"];
// clientMiddleware
export type ClientMiddlewareFunction = Annotations["ClientMiddlewareFunction"];
// loader
export type LoaderArgs = Annotations["LoaderArgs"];
// clientLoader
export type ClientLoaderArgs = Annotations["ClientLoaderArgs"];
// action
export type ActionArgs = Annotations["ActionArgs"];
// clientAction
export type ClientActionArgs = Annotations["ClientActionArgs"];
// HydrateFallback
export type HydrateFallbackProps = Annotations["HydrateFallbackProps"];
// Component
export type ComponentProps = Annotations["ComponentProps"];
// ErrorBoundary
export type ErrorBoundaryProps = Annotations["ErrorBoundaryProps"];
}
`;
return { filename, content };
}
function relativeImportSource(from: string, to: string) {
let path = Path.relative(Path.dirname(from), to);
let extension = Path.extname(path);
// no extension
path = Path.join(Path.dirname(path), Pathe.filename(path));
if (!path.startsWith("../")) path = "./" + path;
// In typescript, we want to support "moduleResolution": "nodenext" as well as not having "allowImportingTsExtensions": true,
// so we normalize all JS like files to `.js`, but allow other extensions such as `.mdx` and others that might be used as routes.
if (!extension || /\.(js|ts)x?$/.test(extension)) {
extension = ".js";
}
return path + extension;
}
function rootDirsPath(ctx: Context, typesPath: string): string {
const rel = Path.relative(typesDirectory(ctx), typesPath);
return Path.join(ctx.rootDirectory, rel);
}
function paramsType(path: string) {
const params = Params.parse(path);
return t.tsTypeLiteral(
Object.entries(params).map(([param, isRequired]) => {
const property = t.tsPropertySignature(
t.stringLiteral(param),
t.tsTypeAnnotation(t.tsStringKeyword()),
);
property.optional = !isRequired;
return property;
}),
);
}
function expand(fullpath: string): Set<string> {
function recurse(segments: Array<string>, index: number): Array<string> {
if (index === segments.length) return [""];
const segment = segments[index];
const isOptional = segment.endsWith("?");
const isDynamic = segment.startsWith(":");
const required = segment.replace(/\?$/, "");
const keep = !isOptional || isDynamic;
const kept = isDynamic ? segment : required;
const withoutSegment = recurse(segments, index + 1);
const withSegment = withoutSegment.map((rest) => [kept, rest].join("/"));
if (keep) return withSegment;
return [...withoutSegment, ...withSegment];
}
const segments = fullpath.split("/");
const expanded = new Set<string>();
for (let result of recurse(segments, 0)) {
if (result !== "/") result = result.replace(/\/$/, "");
expanded.add(result);
}
return expanded;
}
|
typescript
|
github
|
https://github.com/remix-run/react-router
|
packages/react-router-dev/typegen/generate.ts
|
from test import support
import unittest
import builtins
import rlcompleter
class CompleteMe:
""" Trivial class used in testing rlcompleter.Completer. """
spam = 1
class TestRlcompleter(unittest.TestCase):
def setUp(self):
self.stdcompleter = rlcompleter.Completer()
self.completer = rlcompleter.Completer(dict(spam=int,
egg=str,
CompleteMe=CompleteMe))
# forces stdcompleter to bind builtins namespace
self.stdcompleter.complete('', 0)
def test_namespace(self):
class A(dict):
pass
class B(list):
pass
self.assertTrue(self.stdcompleter.use_main_ns)
self.assertFalse(self.completer.use_main_ns)
self.assertFalse(rlcompleter.Completer(A()).use_main_ns)
self.assertRaises(TypeError, rlcompleter.Completer, B((1,)))
def test_global_matches(self):
# test with builtins namespace
self.assertEqual(sorted(self.stdcompleter.global_matches('di')),
[x+'(' for x in dir(builtins) if x.startswith('di')])
self.assertEqual(sorted(self.stdcompleter.global_matches('st')),
[x+'(' for x in dir(builtins) if x.startswith('st')])
self.assertEqual(self.stdcompleter.global_matches('akaksajadhak'), [])
# test with a customized namespace
self.assertEqual(self.completer.global_matches('CompleteM'),
['CompleteMe('])
self.assertEqual(self.completer.global_matches('eg'),
['egg('])
# XXX: see issue5256
self.assertEqual(self.completer.global_matches('CompleteM'),
['CompleteMe('])
def test_attr_matches(self):
# test with builtins namespace
self.assertEqual(self.stdcompleter.attr_matches('str.s'),
['str.{}('.format(x) for x in dir(str)
if x.startswith('s')])
self.assertEqual(self.stdcompleter.attr_matches('tuple.foospamegg'), [])
# test with a customized namespace
self.assertEqual(self.completer.attr_matches('CompleteMe.sp'),
['CompleteMe.spam'])
self.assertEqual(self.completer.attr_matches('Completeme.egg'), [])
CompleteMe.me = CompleteMe
self.assertEqual(self.completer.attr_matches('CompleteMe.me.me.sp'),
['CompleteMe.me.me.spam'])
self.assertEqual(self.completer.attr_matches('egg.s'),
['egg.{}('.format(x) for x in dir(str)
if x.startswith('s')])
def test_main():
support.run_unittest(TestRlcompleter)
if __name__ == '__main__':
test_main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from typing_extensions import Literal
from .response import Response
from ..._models import BaseModel
__all__ = ["ResponseInProgressEvent"]
class ResponseInProgressEvent(BaseModel):
"""Emitted when the response is in progress."""
response: Response
"""The response that is in progress."""
sequence_number: int
"""The sequence number of this event."""
type: Literal["response.in_progress"]
"""The type of the event. Always `response.in_progress`."""
|
python
|
github
|
https://github.com/openai/openai-python
|
src/openai/types/responses/response_in_progress_event.py
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
"""Test of line navigation output of Firefox on bugzilla's advanced
search page.
"""
from macaroon.playback import *
import utils
sequence = MacroSequence()
########################################################################
# We wait for the focus to be on a blank Firefox window.
#
sequence.append(WaitForWindowActivate(utils.firefoxFrameNames, None))
########################################################################
# Load the local "simple form" test case.
#
sequence.append(KeyComboAction("<Control>l"))
sequence.append(WaitForFocus(acc_role=pyatspi.ROLE_ENTRY))
sequence.append(TypeAction(utils.htmlURLPrefix + "bugzilla-advanced.html"))
sequence.append(KeyComboAction("Return"))
sequence.append(WaitForDocLoad())
sequence.append(PauseAction(1000))
########################################################################
# Press Orca+Right to get out of the focused entry, then Control+Home
# to move to the top.
#
sequence.append(KeyPressAction(0, None, "KP_Insert"))
sequence.append(KeyComboAction("Right"))
sequence.append(KeyReleaseAction(0, None, "KP_Insert"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(utils.AssertPresentationAction(
"Top of file",
["BRAILLE LINE: 'Home Image Bugzilla New bug · Browse · Search · Reports · Account · Admin · Help Logged In joanmarie.diggs@gmail.com | Log Out'",
" VISIBLE: 'Home Image Bugzilla New bug · Br', cursor=1",
"BRAILLE LINE: 'Home Image Bugzilla New bug · Browse · Search · Reports · Account · Admin · Help Logged In joanmarie.diggs@gmail.com | Log Out'",
" VISIBLE: 'Home Image Bugzilla New bug · Br', cursor=1",
"SPEECH OUTPUT: 'Home link image Bugzilla New bug link · Browse link · Search link · Reports link · Account link · Admin link · Help link Logged In joanmarie.diggs@gmail.com | Log Out link'",
"SPEECH OUTPUT: 'Home link image'"]))
########################################################################
# Down Arrow.
#
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: ' Short Bug Search Form Complicated Bug Search Form '",
" VISIBLE: ' Short Bug Search Form Complicat', cursor=1",
"SPEECH OUTPUT: ' Short Bug Search Form link Complicated Bug Search Form '"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Give me some help \(reloads page.\)'",
" VISIBLE: 'Give me some help \(reloads page.', cursor=1",
"SPEECH OUTPUT: 'Give me some help link \(reloads page.\)'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Summary: contains all of the words/strings Combo $l Search Button'",
" VISIBLE: 'Summary: contains all of the wor', cursor=1",
"SPEECH OUTPUT: 'Summary: row header contains all of the words/strings combo box text Search button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Classification:'",
" VISIBLE: 'Classification:', cursor=1",
"SPEECH OUTPUT: 'Classification: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Admin List'",
" VISIBLE: 'Admin List', cursor=1",
"SPEECH OUTPUT: 'Admin multi-select List with 8 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Product:'",
" VISIBLE: 'Product:', cursor=1",
"SPEECH OUTPUT: 'Product: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'accerciser List'",
" VISIBLE: 'accerciser List', cursor=1",
"SPEECH OUTPUT: 'accerciser multi-select List with 379 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Component:'",
" VISIBLE: 'Component:', cursor=1",
"SPEECH OUTPUT: 'Component link : column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'abiscan List'",
" VISIBLE: 'abiscan List', cursor=1",
"SPEECH OUTPUT: 'abiscan multi-select List with 1248 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Version:'",
" VISIBLE: 'Version:', cursor=1",
"SPEECH OUTPUT: 'Version: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: '0.0.1 List'",
" VISIBLE: '0.0.1 List', cursor=1",
"SPEECH OUTPUT: '0.0.1 multi-select List with 857 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Target Milestone:'",
" VISIBLE: 'Target Milestone:', cursor=1",
"SPEECH OUTPUT: 'Target Milestone: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: '--- List'",
" VISIBLE: '--- List', cursor=1",
"SPEECH OUTPUT: '--- multi-select List with 555 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'A Comment: contains the string Combo $l'",
" VISIBLE: 'A Comment: contains the string C', cursor=1",
"SPEECH OUTPUT: 'A Comment: row header contains the string combo box text'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Whiteboard: contains all of the words/strings Combo $l'",
" VISIBLE: 'Whiteboard: contains all of the ', cursor=1",
"SPEECH OUTPUT: 'Whiteboard: row header contains all of the words/strings combo box text'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Keywords: contains all of the keywords Combo $l'",
" VISIBLE: 'Keywords: contains all of the ke', cursor=1",
"SPEECH OUTPUT: 'Keywords link : row header contains all of the keywords combo box text'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Separator'",
" VISIBLE: 'Separator', cursor=1",
"SPEECH OUTPUT: 'separator'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Status:'",
" VISIBLE: 'Status:', cursor=1",
"SPEECH OUTPUT: 'Status: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'UNCONFIRMED NEW ASSIGNED REOPENED NEEDINFO List'",
" VISIBLE: 'UNCONFIRMED NEW ASSIGNED REOPENE', cursor=1",
"SPEECH OUTPUT: 'UNCONFIRMED NEW ASSIGNED REOPENED NEEDINFO' voice=uppercase",
"SPEECH OUTPUT: 'multi-select List with 8 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Resolution:'",
" VISIBLE: 'Resolution:', cursor=1",
"SPEECH OUTPUT: 'Resolution: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'FIXED List'",
" VISIBLE: 'FIXED List', cursor=1",
"SPEECH OUTPUT: 'FIXED' voice=uppercase",
"SPEECH OUTPUT: 'multi-select List with 12 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Severity:'",
" VISIBLE: 'Severity:', cursor=1",
"SPEECH OUTPUT: 'Severity: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'blocker List'",
" VISIBLE: 'blocker List', cursor=1",
"SPEECH OUTPUT: 'blocker multi-select List with 7 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Priority:'",
" VISIBLE: 'Priority:', cursor=1",
"SPEECH OUTPUT: 'Priority: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Immediate List'",
" VISIBLE: 'Immediate List', cursor=1",
"SPEECH OUTPUT: 'Immediate multi-select List with 5 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'OS:'",
" VISIBLE: 'OS:', cursor=1",
"SPEECH OUTPUT: 'OS: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'All List'",
" VISIBLE: 'All List', cursor=1",
"SPEECH OUTPUT: 'All multi-select List with 21 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Email and Numbering'",
" VISIBLE: 'Email and Numbering', cursor=1",
"SPEECH OUTPUT: 'Email and Numbering'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Any one of:'",
" VISIBLE: 'Any one of:', cursor=1",
"SPEECH OUTPUT: 'Any one of:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: '<x> CheckBox the bug assignee'",
" VISIBLE: '<x> CheckBox the bug assignee', cursor=1",
"SPEECH OUTPUT: 'the bug assignee check box checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: '< > CheckBox the reporter'",
" VISIBLE: '< > CheckBox the reporter', cursor=1",
"SPEECH OUTPUT: 'the reporter check box not checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: '< > CheckBox the QA contact'",
" VISIBLE: '< > CheckBox the QA contact', cursor=1",
"SPEECH OUTPUT: 'the QA contact check box not checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: '< > CheckBox a CC list member'",
" VISIBLE: '< > CheckBox a CC list member', cursor=1",
"SPEECH OUTPUT: 'a CC list member check box not checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: '< > CheckBox a commenter'",
" VISIBLE: '< > CheckBox a commenter', cursor=1",
"SPEECH OUTPUT: 'a commenter check box not checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'contains Combo'",
" VISIBLE: 'contains Combo', cursor=1",
"SPEECH OUTPUT: 'contains combo box'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: ' $l'",
" VISIBLE: ' $l', cursor=1",
"SPEECH OUTPUT: 'text'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Any one of:'",
" VISIBLE: 'Any one of:', cursor=1",
"SPEECH OUTPUT: 'Any one of:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: '<x> CheckBox the bug assignee'",
" VISIBLE: '<x> CheckBox the bug assignee', cursor=1",
"SPEECH OUTPUT: 'the bug assignee check box checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: '<x> CheckBox the reporter'",
" VISIBLE: '<x> CheckBox the reporter', cursor=1",
"SPEECH OUTPUT: 'the reporter check box checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: '<x> CheckBox the QA contact'",
" VISIBLE: '<x> CheckBox the QA contact', cursor=1",
"SPEECH OUTPUT: 'the QA contact check box checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: '<x> CheckBox a CC list member'",
" VISIBLE: '<x> CheckBox a CC list member', cursor=1",
"SPEECH OUTPUT: 'a CC list member check box checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: '< > CheckBox a commenter'",
" VISIBLE: '< > CheckBox a commenter', cursor=1",
"SPEECH OUTPUT: 'a commenter check box not checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'contains Combo'",
" VISIBLE: 'contains Combo', cursor=1",
"SPEECH OUTPUT: 'contains combo box'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: ' $l'",
" VISIBLE: ' $l', cursor=1",
"SPEECH OUTPUT: 'text'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Separator'",
" VISIBLE: 'Separator', cursor=1",
"SPEECH OUTPUT: 'separator'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Only include Combo bugs numbered: $l'",
" VISIBLE: 'Only include Combo bugs numbered', cursor=1",
"SPEECH OUTPUT: 'Only include combo box bugs numbered: text'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: '\(comma-separated list\)'",
" VISIBLE: '\(comma-separated list\)', cursor=1",
"SPEECH OUTPUT: '\(comma-separated list\)'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Bug Changes'",
" VISIBLE: 'Bug Changes', cursor=1",
"SPEECH OUTPUT: 'Bug Changes'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: '•Only bugs changed between:'",
" VISIBLE: '•Only bugs changed between:', cursor=1",
"SPEECH OUTPUT: '•Only bugs changed between:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BUG? - As with label guess, we're not guessing this label correctly",
"BRAILLE LINE: ' $l and Now $l'",
" VISIBLE: ' $l and Now $l', cursor=1",
"SPEECH OUTPUT: 'Email and Numbering text and text Now ",
"'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: '\(YYYY-MM-DD or relative dates\)'",
" VISIBLE: '\(YYYY-MM-DD or relative dates\)', cursor=1",
"SPEECH OUTPUT: '\(YYYY-MM-DD or relative dates\)'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: '•where one or more of the following changed:'",
" VISIBLE: '•where one or more of the follow', cursor=1",
"SPEECH OUTPUT: '•where one or more of the following changed:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: '[Bug creation] List'",
" VISIBLE: '[Bug creation] List', cursor=1",
"SPEECH OUTPUT: '[Bug creation] multi-select List with 26 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: '•and the new value was:'",
" VISIBLE: '•and the new value was:', cursor=1",
"SPEECH OUTPUT: '•and the new value was:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: ' $l'",
" VISIBLE: ' $l', cursor=1",
"SPEECH OUTPUT: 'Email and Numbering text'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'GNOME version:'",
" VISIBLE: 'GNOME version:', cursor=1",
"SPEECH OUTPUT: 'GNOME version: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Unspecified List'",
" VISIBLE: 'Unspecified List', cursor=1",
"SPEECH OUTPUT: 'Unspecified multi-select List with 14 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'GNOME target:'",
" VISIBLE: 'GNOME target:', cursor=1",
"SPEECH OUTPUT: 'GNOME target: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Unspecified List'",
" VISIBLE: 'Unspecified List', cursor=1",
"SPEECH OUTPUT: 'Unspecified multi-select List with 12 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Sort results by: Reuse same sort as last time Combo'",
" VISIBLE: 'Sort results by: Reuse same sort', cursor=1",
"SPEECH OUTPUT: 'Sort results by: Reuse same sort as last time combo box'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Search Button'",
" VISIBLE: 'Search Button', cursor=1",
"SPEECH OUTPUT: 'Search button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: ' < > CheckBox and remember these as my default search options'",
" VISIBLE: ' < > CheckBox and remember th', cursor=1",
"SPEECH OUTPUT: ' check box not checked and remember these as my default search options'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Separator'",
" VISIBLE: 'Separator', cursor=1",
"SPEECH OUTPUT: 'separator'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Advanced Searching Using Boolean Charts:'",
" VISIBLE: 'Advanced Searching Using Boolean', cursor=1",
"SPEECH OUTPUT: 'Advanced Searching Using Boolean Charts:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: '< > CheckBox Not \(negate this whole chart\)'",
" VISIBLE: '< > CheckBox Not \(negate this wh', cursor=1",
"SPEECH OUTPUT: 'Not (negate this whole chart) check box not checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: '--- Combo --- Combo $l Or Button'",
" VISIBLE: '--- Combo --- Combo $l Or Butto', cursor=1",
"SPEECH OUTPUT: '--- combo box --- combo box text Or button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'And Button Add another boolean chart Button '",
" VISIBLE: 'And Button Add another boo', cursor=1",
"SPEECH OUTPUT: 'And button Add another boolean chart button '"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Separator'",
" VISIBLE: 'Separator', cursor=1",
"SPEECH OUTPUT: 'separator'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"Line Down",
["BRAILLE LINE: 'Saved Searches: My Bugs and Patches | All Orca | Firefox | open orca | Open RFEs'",
" VISIBLE: 'Saved Searches: My Bugs and Patc', cursor=1",
"SPEECH OUTPUT: 'Saved Searches: My Bugs and Patches link | All Orca link | Firefox link | open orca link | Open RFEs link'"]))
########################################################################
# Up Arrow.
#
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Separator'",
" VISIBLE: 'Separator', cursor=1",
"SPEECH OUTPUT: 'separator'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'And Button Add another boolean chart Button '",
" VISIBLE: 'And Button Add another boo', cursor=1",
"SPEECH OUTPUT: 'And button Add another boolean chart button '"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: '--- Combo --- Combo $l Or Button'",
" VISIBLE: '--- Combo --- Combo $l Or Butto', cursor=1",
"SPEECH OUTPUT: '--- combo box --- combo box text Or button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: '< > CheckBox Not \(negate this whole chart\)'",
" VISIBLE: '< > CheckBox Not \(negate this wh', cursor=1",
"SPEECH OUTPUT: 'Not \(negate this whole chart\) check box not checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Advanced Searching Using Boolean Charts:'",
" VISIBLE: 'Advanced Searching Using Boolean', cursor=1",
"SPEECH OUTPUT: 'Advanced Searching Using Boolean Charts:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Separator'",
" VISIBLE: 'Separator', cursor=1",
"SPEECH OUTPUT: 'separator'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: ' < > CheckBox and remember these as my default search options'",
" VISIBLE: ' < > CheckBox and remember th', cursor=1",
"SPEECH OUTPUT: ' check box not checked and remember these as my default search options'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Search Button'",
" VISIBLE: 'Search Button', cursor=1",
"SPEECH OUTPUT: 'Search button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Sort results by: Reuse same sort as last time Combo'",
" VISIBLE: 'Sort results by: Reuse same sort', cursor=1",
"SPEECH OUTPUT: 'Sort results by: Reuse same sort as last time combo box'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Unspecified List'",
" VISIBLE: 'Unspecified List', cursor=1",
"SPEECH OUTPUT: 'Unspecified multi-select List with 12 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'GNOME target:'",
" VISIBLE: 'GNOME target:', cursor=1",
"SPEECH OUTPUT: 'GNOME target: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Unspecified List'",
" VISIBLE: 'Unspecified List', cursor=1",
"SPEECH OUTPUT: 'Unspecified multi-select List with 14 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'GNOME version:'",
" VISIBLE: 'GNOME version:', cursor=1",
"SPEECH OUTPUT: 'GNOME version: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: ' $l'",
" VISIBLE: ' $l', cursor=1",
"SPEECH OUTPUT: 'Email and Numbering text'",]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: '•and the new value was:'",
" VISIBLE: '•and the new value was:', cursor=1",
"SPEECH OUTPUT: '•and the new value was:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: '[Bug creation] List'",
" VISIBLE: '[Bug creation] List', cursor=1",
"SPEECH OUTPUT: '[Bug creation] multi-select List with 26 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: '•where one or more of the following changed:'",
" VISIBLE: '•where one or more of the follow', cursor=1",
"SPEECH OUTPUT: '•where one or more of the following changed:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: '\(YYYY-MM-DD or relative dates\)'",
" VISIBLE: '\(YYYY-MM-DD or relative dates\)', cursor=1",
"SPEECH OUTPUT: '\(YYYY-MM-DD or relative dates\)'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BUG? - As with label guess, we're not guessing this label correctly",
"BRAILLE LINE: ' $l and Now $l'",
" VISIBLE: ' $l and Now $l', cursor=1",
"SPEECH OUTPUT: 'Email and Numbering text and text Now ",
"'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: '•Only bugs changed between:'",
" VISIBLE: '•Only bugs changed between:', cursor=1",
"SPEECH OUTPUT: '•Only bugs changed between:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Bug Changes'",
" VISIBLE: 'Bug Changes', cursor=1",
"SPEECH OUTPUT: 'Bug Changes'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: '\(comma-separated list\)'",
" VISIBLE: '\(comma-separated list\)', cursor=1",
"SPEECH OUTPUT: '\(comma-separated list\)'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Only include Combo bugs numbered: $l'",
" VISIBLE: 'Only include Combo bugs numbered', cursor=1",
"SPEECH OUTPUT: 'Only include combo box bugs numbered: text'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Separator'",
" VISIBLE: 'Separator', cursor=1",
"SPEECH OUTPUT: 'separator'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: ' $l'",
" VISIBLE: ' $l', cursor=1",
"SPEECH OUTPUT: 'text'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'contains Combo'",
" VISIBLE: 'contains Combo', cursor=1",
"SPEECH OUTPUT: 'contains combo box'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: '< > CheckBox a commenter'",
" VISIBLE: '< > CheckBox a commenter', cursor=1",
"SPEECH OUTPUT: 'a commenter check box not checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: '<x> CheckBox a CC list member'",
" VISIBLE: '<x> CheckBox a CC list member', cursor=1",
"SPEECH OUTPUT: 'a CC list member check box checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: '<x> CheckBox the QA contact'",
" VISIBLE: '<x> CheckBox the QA contact', cursor=1",
"SPEECH OUTPUT: 'the QA contact check box checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: '<x> CheckBox the reporter'",
" VISIBLE: '<x> CheckBox the reporter', cursor=1",
"SPEECH OUTPUT: 'the reporter check box checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: '<x> CheckBox the bug assignee'",
" VISIBLE: '<x> CheckBox the bug assignee', cursor=1",
"SPEECH OUTPUT: 'the bug assignee check box checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Any one of:'",
" VISIBLE: 'Any one of:', cursor=1",
"SPEECH OUTPUT: 'Any one of:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: ' $l'",
" VISIBLE: ' $l', cursor=1",
"SPEECH OUTPUT: 'text'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'contains Combo'",
" VISIBLE: 'contains Combo', cursor=1",
"SPEECH OUTPUT: 'contains combo box'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: '< > CheckBox a commenter'",
" VISIBLE: '< > CheckBox a commenter', cursor=1",
"SPEECH OUTPUT: 'a commenter check box not checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: '< > CheckBox a CC list member'",
" VISIBLE: '< > CheckBox a CC list member', cursor=1",
"SPEECH OUTPUT: 'a CC list member check box not checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: '< > CheckBox the QA contact'",
" VISIBLE: '< > CheckBox the QA contact', cursor=1",
"SPEECH OUTPUT: 'the QA contact check box not checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: '< > CheckBox the reporter'",
" VISIBLE: '< > CheckBox the reporter', cursor=1",
"SPEECH OUTPUT: 'the reporter check box not checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: '<x> CheckBox the bug assignee'",
" VISIBLE: '<x> CheckBox the bug assignee', cursor=1",
"SPEECH OUTPUT: 'the bug assignee check box checked'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Any one of:'",
" VISIBLE: 'Any one of:', cursor=1",
"SPEECH OUTPUT: 'Any one of:'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Email and Numbering'",
" VISIBLE: 'Email and Numbering', cursor=1",
"SPEECH OUTPUT: 'Email and Numbering'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'All List'",
" VISIBLE: 'All List', cursor=1",
"SPEECH OUTPUT: 'All multi-select List with 21 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'OS:'",
" VISIBLE: 'OS:', cursor=1",
"SPEECH OUTPUT: 'OS: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Immediate List'",
" VISIBLE: 'Immediate List', cursor=1",
"SPEECH OUTPUT: 'Immediate multi-select List with 5 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Priority:'",
" VISIBLE: 'Priority:', cursor=1",
"SPEECH OUTPUT: 'Priority: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'blocker List'",
" VISIBLE: 'blocker List', cursor=1",
"SPEECH OUTPUT: 'blocker multi-select List with 7 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Severity:'",
" VISIBLE: 'Severity:', cursor=1",
"SPEECH OUTPUT: 'Severity: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'FIXED List'",
" VISIBLE: 'FIXED List', cursor=1",
"SPEECH OUTPUT: 'FIXED' voice=uppercase",
"SPEECH OUTPUT: 'multi-select List with 12 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Resolution:'",
" VISIBLE: 'Resolution:', cursor=1",
"SPEECH OUTPUT: 'Resolution: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'UNCONFIRMED NEW ASSIGNED REOPENED NEEDINFO List'",
" VISIBLE: 'UNCONFIRMED NEW ASSIGNED REOPENE', cursor=1",
"SPEECH OUTPUT: 'UNCONFIRMED NEW ASSIGNED REOPENED NEEDINFO' voice=uppercase",
"SPEECH OUTPUT: 'multi-select List with 8 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Status:'",
" VISIBLE: 'Status:', cursor=1",
"SPEECH OUTPUT: 'Status: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Separator'",
" VISIBLE: 'Separator', cursor=1",
"SPEECH OUTPUT: 'separator'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Keywords: contains all of the keywords Combo $l'",
" VISIBLE: 'Keywords: contains all of the ke', cursor=1",
"SPEECH OUTPUT: 'Keywords link : row header contains all of the keywords combo box text'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Whiteboard: contains all of the words/strings Combo $l'",
" VISIBLE: 'Whiteboard: contains all of the ', cursor=1",
"SPEECH OUTPUT: 'Whiteboard: row header contains all of the words/strings combo box text'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'A Comment: contains the string Combo $l'",
" VISIBLE: 'A Comment: contains the string C', cursor=1",
"SPEECH OUTPUT: 'A Comment: row header contains the string combo box text'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: '--- List'",
" VISIBLE: '--- List', cursor=1",
"SPEECH OUTPUT: '--- multi-select List with 555 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Target Milestone:'",
" VISIBLE: 'Target Milestone:', cursor=1",
"SPEECH OUTPUT: 'Target Milestone: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: '0.0.1 List'",
" VISIBLE: '0.0.1 List', cursor=1",
"SPEECH OUTPUT: '0.0.1 multi-select List with 857 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Version:'",
" VISIBLE: 'Version:', cursor=1",
"SPEECH OUTPUT: 'Version: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'abiscan List'",
" VISIBLE: 'abiscan List', cursor=1",
"SPEECH OUTPUT: 'abiscan multi-select List with 1248 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Component:'",
" VISIBLE: 'Component:', cursor=1",
"SPEECH OUTPUT: 'Component link : column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'accerciser List'",
" VISIBLE: 'accerciser List', cursor=1",
"SPEECH OUTPUT: 'accerciser multi-select List with 379 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Product:'",
" VISIBLE: 'Product:', cursor=1",
"SPEECH OUTPUT: 'Product: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Admin List'",
" VISIBLE: 'Admin List', cursor=1",
"SPEECH OUTPUT: 'Admin multi-select List with 8 items'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Classification:'",
" VISIBLE: 'Classification:', cursor=1",
"SPEECH OUTPUT: 'Classification: column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Summary: contains all of the words/strings Combo $l Search Button'",
" VISIBLE: 'Summary: contains all of the wor', cursor=1",
"SPEECH OUTPUT: 'Summary: row header contains all of the words/strings combo box text Search button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'Give me some help \(reloads page.\)'",
" VISIBLE: 'Give me some help \(reloads page.', cursor=1",
"SPEECH OUTPUT: 'Give me some help link \(reloads page.\)'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: ' Short Bug Search Form Complicated Bug Search Form '",
" VISIBLE: ' Short Bug Search Form Complicat', cursor=1",
"SPEECH OUTPUT: ' Short Bug Search Form link Complicated Bug Search Form '"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"Line Up",
["BRAILLE LINE: 'New bug · Browse · Search · Reports · Account · Admin · Help Logged In joanmarie.diggs@gmail.com | Log Out'",
" VISIBLE: 'New bug · Browse · Search · Repo', cursor=1",
"SPEECH OUTPUT: 'New bug link · Browse link · Search link · Reports link · Account link · Admin link · Help link Logged In joanmarie.diggs@gmail.com | Log Out link'"]))
########################################################################
# Move to the location bar by pressing Control+L. When it has focus
# type "about:blank" and press Return to restore the browser to the
# conditions at the test's start.
#
sequence.append(KeyComboAction("<Control>l"))
sequence.append(WaitForFocus(acc_role=pyatspi.ROLE_ENTRY))
sequence.append(TypeAction("about:blank"))
sequence.append(KeyComboAction("Return"))
sequence.append(WaitForDocLoad())
# Just a little extra wait to let some events get through.
#
sequence.append(PauseAction(3000))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# ******************************************************************************
# Copyright 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
import pytest
import ngraph as ng
from test.ngraph.util import get_runtime
def test_elu_operator_with_scalar_and_array():
runtime = get_runtime()
data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32)
alpha_value = np.float32(3)
model = ng.elu(data_value, alpha_value)
computation = runtime.computation(model)
result = computation()
expected = np.array([[-2.9797862, 1.0], [-2.5939941, 3.0]], dtype=np.float32)
assert np.allclose(result, expected)
def test_elu_operator_with_scalar():
runtime = get_runtime()
data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32)
alpha_value = np.float32(3)
data_shape = [2, 2]
parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32)
model = ng.elu(parameter_data, alpha_value)
computation = runtime.computation(model, parameter_data)
result = computation(data_value)
expected = np.array([[-2.9797862, 1.0], [-2.5939941, 3.0]], dtype=np.float32)
assert np.allclose(result, expected)
def test_fake_quantize():
runtime = get_runtime()
data_value = np.arange(24.0, dtype=np.float32).reshape(1, 2, 3, 4)
input_low_value = np.float32(0)
input_high_value = np.float32(23)
output_low_value = np.float32(2)
output_high_value = np.float32(16)
levels = np.float32(4)
data_shape = [1, 2, 3, 4]
bound_shape = []
parameter_data = ng.parameter(data_shape, name="data", dtype=np.float32)
parameter_input_low = ng.parameter(bound_shape, name="input_low", dtype=np.float32)
parameter_input_high = ng.parameter(bound_shape, name="input_high", dtype=np.float32)
parameter_output_low = ng.parameter(bound_shape, name="output_low", dtype=np.float32)
parameter_output_high = ng.parameter(bound_shape, name="output_high", dtype=np.float32)
model = ng.fake_quantize(
parameter_data,
parameter_input_low,
parameter_input_high,
parameter_output_low,
parameter_output_high,
levels,
)
computation = runtime.computation(
model,
parameter_data,
parameter_input_low,
parameter_input_high,
parameter_output_low,
parameter_output_high,
)
result = computation(
data_value, input_low_value, input_high_value, output_low_value, output_high_value
)
expected = np.array(
[
[
[
[
[2.0, 2.0, 2.0, 2.0],
[6.6666669, 6.6666669, 6.6666669, 6.6666669],
[6.6666669, 6.6666669, 6.6666669, 6.6666669],
],
[
[11.33333301, 11.33333301, 11.33333301, 11.33333301],
[11.33333301, 11.33333301, 11.33333301, 11.33333301],
[16.0, 16.0, 16.0, 16.0],
],
]
]
],
dtype=np.float32,
)
assert np.allclose(result, expected)
def test_depth_to_space():
runtime = get_runtime()
data_value = np.array(
[
[
[[0, 1, 2], [3, 4, 5]],
[[6, 7, 8], [9, 10, 11]],
[[12, 13, 14], [15, 16, 17]],
[[18, 19, 20], [21, 22, 23]],
]
],
dtype=np.float32,
)
mode = "blocks_first"
block_size = np.float32(2)
data_shape = [1, 4, 2, 3]
parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32)
model = ng.depth_to_space(parameter_data, mode, block_size)
computation = runtime.computation(model, parameter_data)
result = computation(data_value)
expected = np.array(
[
[
[
[0, 6, 1, 7, 2, 8],
[12, 18, 13, 19, 14, 20],
[3, 9, 4, 10, 5, 11],
[15, 21, 16, 22, 17, 23],
]
]
],
dtype=np.float32,
)
assert np.allclose(result, expected)
def test_space_to_batch():
runtime = get_runtime()
data_value = np.array([[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]]], dtype=np.float32)
data_shape = data_value.shape
block_shape = np.array([1, 2, 3, 2], dtype=np.int64)
pads_begin = np.array([0, 0, 1, 0], dtype=np.int64)
pads_end = np.array([0, 0, 0, 1], dtype=np.int64)
parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32)
model = ng.space_to_batch(parameter_data, block_shape, pads_begin, pads_end)
computation = runtime.computation(model, parameter_data)
result = computation(data_value)
expected = np.array(
[
[[[0, 0]]],
[[[0, 0]]],
[[[0, 2]]],
[[[1, 0]]],
[[[3, 5]]],
[[[4, 0]]],
[[[0, 0]]],
[[[0, 0]]],
[[[6, 8]]],
[[[7, 0]]],
[[[9, 11]]],
[[[10, 0]]],
],
dtype=np.float32,
)
assert np.allclose(result, expected)
def test_batch_to_space():
runtime = get_runtime()
data = np.array(
[
[[[0, 0]]],
[[[0, 0]]],
[[[0, 2]]],
[[[1, 0]]],
[[[3, 5]]],
[[[4, 0]]],
[[[0, 0]]],
[[[0, 0]]],
[[[6, 8]]],
[[[7, 0]]],
[[[9, 11]]],
[[[10, 0]]],
],
dtype=np.float32,
)
data_shape = data.shape
block_shape = np.array([1, 2, 3, 2], dtype=np.int64)
crops_begin = np.array([0, 0, 1, 0], dtype=np.int64)
crops_end = np.array([0, 0, 0, 1], dtype=np.int64)
parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32)
model = ng.batch_to_space(parameter_data, block_shape, crops_begin, crops_end)
computation = runtime.computation(model, parameter_data)
result = computation(data)
expected = np.array([[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]]], dtype=np.float32)
assert np.allclose(result, expected)
def test_gelu_operator_with_parameters():
runtime = get_runtime()
data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32)
data_shape = [2, 2]
parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32)
model = ng.gelu(parameter_data)
computation = runtime.computation(model, parameter_data)
result = computation(data_value)
expected = np.array(
[[-1.4901161e-06, 8.4134471e-01], [-4.5500278e-02, 2.9959502]], dtype=np.float32
)
assert np.allclose(result, expected, 0.007, 0.007)
def test_gelu_operator_with_array():
runtime = get_runtime()
data_value = np.array([[-5, 1], [-2, 3]], dtype=np.float32)
model = ng.gelu(data_value)
computation = runtime.computation(model)
result = computation()
expected = np.array(
[[-1.4901161e-06, 8.4134471e-01], [-4.5500278e-02, 2.9959502]], dtype=np.float32
)
assert np.allclose(result, expected, 0.007, 0.007)
def test_clamp_operator():
runtime = get_runtime()
data_shape = [2, 2]
parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32)
min_value = np.float32(3)
max_value = np.float32(12)
model = ng.clamp(parameter_data, min_value, max_value)
computation = runtime.computation(model, parameter_data)
data_value = np.array([[-5, 9], [45, 3]], dtype=np.float32)
result = computation(data_value)
expected = np.clip(data_value, min_value, max_value)
assert np.allclose(result, expected)
def test_clamp_operator_with_array():
runtime = get_runtime()
data_value = np.array([[-5, 9], [45, 3]], dtype=np.float32)
min_value = np.float32(3)
max_value = np.float32(12)
model = ng.clamp(data_value, min_value, max_value)
computation = runtime.computation(model)
result = computation()
expected = np.clip(data_value, min_value, max_value)
assert np.allclose(result, expected)
def test_squeeze_operator():
runtime = get_runtime()
data_shape = [1, 2, 1, 3, 1, 1]
parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32)
data_value = np.arange(6.0, dtype=np.float32).reshape(1, 2, 1, 3, 1, 1)
axes = [2, 4]
model = ng.squeeze(parameter_data, axes)
computation = runtime.computation(model, parameter_data)
result = computation(data_value)
expected = np.arange(6.0, dtype=np.float32).reshape(1, 2, 3, 1)
assert np.allclose(result, expected)
def test_squared_difference_operator():
runtime = get_runtime()
x1_shape = [1, 2, 3, 4]
x2_shape = [2, 3, 4]
parameter_x1 = ng.parameter(x1_shape, name="x1", dtype=np.float32)
parameter_x2 = ng.parameter(x2_shape, name="x2", dtype=np.float32)
x1_value = np.arange(24.0, dtype=np.float32).reshape(x1_shape)
x2_value = np.arange(start=4.0, stop=28.0, step=1.0, dtype=np.float32).reshape(x2_shape)
model = ng.squared_difference(parameter_x1, parameter_x2)
computation = runtime.computation(model, parameter_x1, parameter_x2)
result = computation(x1_value, x2_value)
expected = np.square(np.subtract(x1_value, x2_value))
assert np.allclose(result, expected)
@pytest.mark.skip_on_cpu
@pytest.mark.skip_on_interpreter
def test_shuffle_channels_operator():
runtime = get_runtime()
data_shape = [1, 15, 2, 2]
axis = 1
groups = 5
parameter = ng.parameter(data_shape, name="Data", dtype=np.float32)
data_value = np.arange(60.0, dtype=np.float32).reshape(data_shape)
model = ng.shuffle_channels(parameter, axis, groups)
computation = runtime.computation(model, parameter)
result = computation(data_value)
expected = np.array(
[
[
[[0.0, 1.0], [2.0, 3.0]],
[[12.0, 13.0], [14.0, 15.0]],
[[24.0, 25.0], [26.0, 27.0]],
[[36.0, 37.0], [38.0, 39.0]],
[[48.0, 49.0], [50.0, 51.0]],
[[4.0, 5.0], [6.0, 7.0]],
[[16.0, 17.0], [18.0, 19.0]],
[[28.0, 29.0], [30.0, 31.0]],
[[40.0, 41.0], [42.0, 43.0]],
[[52.0, 53.0], [54.0, 55.0]],
[[8.0, 9.0], [10.0, 11.0]],
[[20.0, 21.0], [22.0, 23.0]],
[[32.0, 33.0], [34.0, 35.0]],
[[44.0, 45.0], [46.0, 47.0]],
[[56.0, 57.0], [58.0, 59.0]],
]
],
dtype=np.float32,
)
assert np.allclose(result, expected)
def test_unsqueeze():
runtime = get_runtime()
data_shape = [3, 4, 5]
parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32)
data_value = np.arange(60.0, dtype=np.float32).reshape(3, 4, 5)
axes = [0, 4]
model = ng.unsqueeze(parameter_data, axes)
computation = runtime.computation(model, parameter_data)
result = computation(data_value)
expected = np.arange(60.0, dtype=np.float32).reshape(1, 3, 4, 5, 1)
assert np.allclose(result, expected)
def test_grn_operator():
runtime = get_runtime()
data_value = np.arange(start=1.0, stop=25.0, dtype=np.float32).reshape(1, 2, 3, 4)
bias = np.float32(1e-6)
data_shape = [1, 2, 3, 4]
parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32)
model = ng.grn(parameter_data, bias)
computation = runtime.computation(model, parameter_data)
result = computation(data_value)
expected = np.array(
[
[
[
[0.0766965, 0.14142136, 0.19611613, 0.24253564],
[0.28216633, 0.31622776, 0.34570536, 0.37139067],
[0.39391932, 0.41380295, 0.4314555, 0.4472136],
],
[
[0.9970545, 0.98994946, 0.9805807, 0.97014254],
[0.9593655, 0.9486833, 0.9383431, 0.9284767],
[0.91914505, 0.9103665, 0.9021342, 0.8944272],
],
]
],
dtype=np.float32,
)
assert np.allclose(result, expected)
def test_prelu_operator():
runtime = get_runtime()
data_shape = [1, 2, 3, 4]
slope_shape = [2, 3, 1]
data_value = np.arange(start=1.0, stop=25.0, dtype=np.float32).reshape(data_shape)
slope_value = np.arange(start=-10.0, stop=-4.0, dtype=np.float32).reshape(slope_shape)
parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32)
parameter_slope = ng.parameter(slope_shape, name="Slope", dtype=np.float32)
model = ng.prelu(parameter_data, parameter_slope)
computation = runtime.computation(model, parameter_data, parameter_slope)
result = computation(data_value, slope_value)
expected = np.clip(data_value, 0, np.inf) + np.clip(data_value, -np.inf, 0) * slope_value
assert np.allclose(result, expected)
def test_selu_operator():
runtime = get_runtime()
data_shape = [4, 2, 3, 1]
data = np.arange(start=1.0, stop=25.0, dtype=np.float32).reshape(data_shape)
alpha = np.array(1.6733, dtype=np.float32)
lambda_value = np.array(1.0507, dtype=np.float32)
parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32)
model = ng.selu(parameter_data, alpha, lambda_value)
computation = runtime.computation(model, parameter_data)
result = computation(data)
expected = lambda_value * ((data > 0) * data + (data <= 0) * (alpha * np.exp(data) - alpha))
assert np.allclose(result, expected)
def test_hard_sigmoid_operator():
runtime = get_runtime()
data_shape = [3]
alpha_value = np.float32(0.5)
beta_value = np.float32(0.6)
data_value = np.array([-1, 0, 1], dtype=np.float32)
parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32)
parameter_alpha = ng.parameter([], name="Alpha", dtype=np.float32)
parameter_beta = ng.parameter([], name="Beta", dtype=np.float32)
model = ng.hard_sigmoid(parameter_data, parameter_alpha, parameter_beta)
computation = runtime.computation(model, parameter_data, parameter_alpha, parameter_beta)
result = computation(data_value, alpha_value, beta_value)
expected = [0.1, 0.6, 1.0]
assert np.allclose(result, expected)
def test_mvn_operator():
runtime = get_runtime()
data_shape = [3, 3, 3, 1]
across_channels = True
normalize_variance = True
eps = np.float32(1e-9)
data_value = np.array(
[
[
[[0.8439683], [0.5665144], [0.05836735]],
[[0.02916367], [0.12964272], [0.5060197]],
[[0.79538304], [0.9411346], [0.9546573]],
],
[
[[0.17730942], [0.46192095], [0.26480448]],
[[0.6746842], [0.01665257], [0.62473077]],
[[0.9240844], [0.9722341], [0.11965699]],
],
[
[[0.41356155], [0.9129373], [0.59330076]],
[[0.81929934], [0.7862604], [0.11799799]],
[[0.69248444], [0.54119414], [0.07513223]],
],
],
dtype=np.float32,
)
parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32)
model = ng.mvn(parameter_data, across_channels, normalize_variance, eps)
computation = runtime.computation(model, parameter_data)
result = computation(data_value)
expected = np.array(
[
[
[[0.9951074], [0.14548765], [-1.410561]],
[[-1.4999886], [-1.1923014], [-0.03975919]],
[[0.8463296], [1.2926502], [1.3340596]],
],
[
[[-1.0463363], [-0.1747985], [-0.7784088]],
[[0.47672555], [-1.5383], [0.32375798]],
[[1.2404392], [1.3878832], [-1.2228798]],
],
[
[[-0.3228847], [1.2063044], [0.22751297]],
[[0.91956615], [0.81839436], [-1.2279599]],
[[0.5312334], [0.067952], [-1.3592235]],
],
],
)
assert np.allclose(result, expected)
def test_space_to_depth_operator():
runtime = get_runtime()
data_shape = [1, 2, 4, 4]
data_value = np.arange(start=0, stop=32, step=1.0, dtype=np.float32).reshape(data_shape)
mode = "blocks_first"
block_size = 2
parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32)
model = ng.space_to_depth(parameter_data, mode, block_size)
computation = runtime.computation(model, parameter_data)
result = computation(data_value)
expected = np.array(
[
0,
2,
8,
10,
16,
18,
24,
26,
1,
3,
9,
11,
17,
19,
25,
27,
4,
6,
12,
14,
20,
22,
28,
30,
5,
7,
13,
15,
21,
23,
29,
31,
],
dtype=np.float32,
).reshape(1, 8, 2, 2)
assert np.allclose(result, expected)
@pytest.mark.skip_on_cpu
def test_rnn_cell_operator():
runtime = get_runtime()
batch_size = 2
input_size = 3
hidden_size = 3
X_shape = [batch_size, input_size]
H_t_shape = [batch_size, hidden_size]
W_shape = [hidden_size, input_size]
R_shape = [hidden_size, hidden_size]
B_shape = [hidden_size]
parameter_X = ng.parameter(X_shape, name="X", dtype=np.float32)
parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=np.float32)
parameter_W = ng.parameter(W_shape, name="W", dtype=np.float32)
parameter_R = ng.parameter(R_shape, name="R", dtype=np.float32)
parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32)
X_value = np.array(
[0.3432185, 0.612268, 0.20272376, 0.9513413, 0.30585995, 0.7265472], dtype=np.float32
).reshape(X_shape)
H_t_value = np.array(
[0.12444675, 0.52055854, 0.46489045, 0.4983964, 0.7730452, 0.28439692], dtype=np.float32
).reshape(H_t_shape)
W_value = np.array(
[
0.41930267,
0.7872176,
0.89940447,
0.23659843,
0.24676207,
0.17101714,
0.3147149,
0.6555601,
0.4559603,
],
dtype=np.float32,
).reshape(W_shape)
R_value = np.array(
[
0.8374871,
0.86660194,
0.82114047,
0.71549815,
0.18775631,
0.3182116,
0.25392973,
0.38301638,
0.85531586,
],
dtype=np.float32,
).reshape(R_shape)
B_value = np.array([1.0289404, 1.6362579, 0.4370661], dtype=np.float32).reshape(B_shape)
activations = ["sigmoid"]
activation_alpha = []
activation_beta = []
clip = 2.88
model = ng.rnn_cell(
parameter_X,
parameter_H_t,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
activations,
activation_alpha,
activation_beta,
clip,
)
computation = runtime.computation(
model, parameter_X, parameter_H_t, parameter_W, parameter_R, parameter_B
)
result = computation(X_value, H_t_value, W_value, R_value, B_value)
expected = np.array(
[0.94126844, 0.9036043, 0.841243, 0.9468489, 0.934215, 0.873708], dtype=np.float32
).reshape(batch_size, hidden_size)
assert np.allclose(result, expected)
def test_group_convolution_operator():
runtime = get_runtime()
data_shape = [1, 4, 2, 2]
filters_shape = [2, 1, 2, 1, 1]
parameter_data = ng.parameter(data_shape, name="Data", dtype=np.float32)
parameter_filters = ng.parameter(filters_shape, name="Filters", dtype=np.float32)
data_value = np.arange(start=1.0, stop=17.0, dtype=np.float32).reshape(data_shape)
filters_value = np.arange(start=1.0, stop=5.0, dtype=np.float32).reshape(filters_shape)
strides = [1, 1]
dilations = [1, 1]
pads_begin = [0, 0]
pads_end = [0, 0]
model = ng.group_convolution(
parameter_data, parameter_filters, strides, pads_begin, pads_end, dilations
)
computation = runtime.computation(model, parameter_data, parameter_filters)
result = computation(data_value, filters_value)
expected = np.array([11, 14, 17, 20, 79, 86, 93, 100], dtype=np.float32).reshape(1, 2, 2, 2)
assert np.allclose(result, expected)
@pytest.mark.xfail(reason="Computation mismatch")
def test_group_convolution_backprop_data():
runtime = get_runtime()
data_shape = [1, 1, 3, 3]
filters_shape = [1, 1, 1, 3, 3]
strides = [2, 2]
output_padding = [1, 1]
pads_begin = [1, 1]
pads_end = [1, 1]
data_node = ng.parameter(data_shape, name="Data", dtype=np.float32)
filters_node = ng.parameter(filters_shape, name="Filters", dtype=np.float32)
model = ng.group_convolution_backprop_data(
data_node, filters_node, strides, None, pads_begin, pads_end, output_padding=output_padding
)
data_value = np.array(
[
0.16857791,
-0.15161794,
0.08540368,
0.1820628,
-0.21746576,
0.08245695,
0.1431433,
-0.43156421,
0.30591947,
],
dtype=np.float32,
).reshape(data_shape)
filters_value = np.array(
[
-0.06230065,
0.37932432,
-0.25388849,
0.33878803,
0.43709868,
-0.22477469,
0.04118127,
-0.44696793,
0.06373066,
],
dtype=np.float32,
).reshape(filters_shape)
computation = runtime.computation(model, data_node, filters_node)
result = computation(data_value, filters_value)
expected = np.array(
[
0.07368518,
-0.08925839,
-0.06627201,
0.06301362,
0.03732984,
-0.01919658,
-0.00628807,
-0.02817563,
-0.01472169,
0.04392925,
-0.00689478,
-0.01549204,
0.07957941,
-0.11459791,
-0.09505399,
0.07681622,
0.03604182,
-0.01853423,
-0.0270785,
-0.00680824,
-0.06650258,
0.08004665,
0.07918708,
0.0724144,
0.06256775,
-0.17838378,
-0.18863615,
0.20064656,
0.133717,
-0.06876295,
-0.06398046,
-0.00864975,
0.19289537,
-0.01490572,
-0.13673618,
0.01949645,
],
dtype=np.float32,
).reshape(1, 1, 6, 6)
assert np.allclose(result, expected)
def test_group_convolution_backprop_data_output_shape():
runtime = get_runtime()
data_shape = [1, 1, 1, 10]
filters_shape = [1, 1, 1, 1, 5]
strides = [1, 1]
data_node = ng.parameter(data_shape, name="Data", dtype=np.float32)
filters_node = ng.parameter(filters_shape, name="Filters", dtype=np.float32)
output_shape_node = ng.constant(np.array([1, 14], dtype=np.int64))
model = ng.group_convolution_backprop_data(
data_node, filters_node, strides, output_shape_node, auto_pad="same_upper"
)
data_value = np.array(
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0], dtype=np.float32
).reshape(data_shape)
filters_value = np.array([1.0, 2.0, 3.0, 2.0, 1.0], dtype=np.float32).reshape(filters_shape)
computation = runtime.computation(model, data_node, filters_node)
result = computation(data_value, filters_value)
expected = np.array(
[0.0, 1.0, 4.0, 10.0, 18.0, 27.0, 36.0, 45.0, 54.0, 63.0, 62.0, 50.0, 26.0, 9.0],
dtype=np.float32,
).reshape(1, 1, 1, 14)
assert np.allclose(result, expected)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Nessi Network Simulator
#
# Authors: Juergen Ehrensberger; IICT HEIG-VD
# Creation: February 2005
#
# Copyright (c) 2003-2007 Juergen Ehrensberger
#
# This file is part of Nessi.
#
# Nessi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License.
#
# Nessi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Nessi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
Implementations of different device types, like NICs, virtual devices, etc.
"""
__all__ = ["NIC", "AP", "QAP", "WNIC", "QWNIC"]
from netbase import Device, NIU
class NIC(NIU):
"""
Implementation of a simple Network Interface Card (NIC).
"""
def __init__(self):
pass
def setNode(self, node, devicename):
"""Attach the NIU to a node.
Arguments:
node:Node -- Node to which the NIU is attached.
devicename:String -- Name to access the NIU on the node.
Return value: None.
"""
self._node = node
self.devicename = devicename
self.fullName = "%s.%s"%(node.hostname, devicename)
def attachToMedium(self, medium, position):
"""Attach the NIU to a transmission medium.
Call medium.attachNIU to inform it of its new NIU.
Arguments:
medium:Medium
position:PositionType -- Coordinates of the NIU on the medium
Return value: None."""
self.medium = medium
medium.attachNIU(self, position)
def addProtocol(self, protocolEntity, protocolName):
"""Add a new protocol entity to the node under the given name.
After adding, the protocol entity is accessible as a public data
member of the node with the protoName as attribute name.
The method calls protocolEntity.install to give the protocol
the possibility to initialize itself.
Arguments:
protocolEntity : ProtocolEntity
protoName : String
Return value : None.
"""
setattr(self, protocolName, protocolEntity)
protocolEntity.install(self, protocolName)
class AP(NIU):
"""
Implementation of an Access Point (AP) for a Wireless Network.
"""
def __init__(self):
pass
def setNode(self, node, devicename):
"""Attach the NIU to a node.
Arguments:
node:Node -- Node to which the NIU is attached.
devicename:String -- Name to access the NIU on the node.
Return value: None.
"""
self._node = node
self.devicename = devicename
self.fullName = "%s.%s"%(node.hostname, devicename)
def attachToMedium(self, medium, position):
"""Attach the NIU to a transmission medium.
Call medium.attachNIU to inform it of its new NIU.
Arguments:
medium:Medium
position:PositionType -- Coordinates of the NIU on the medium
Return value: None."""
self.medium = medium
medium.attachNIU(self, position)
def addProtocol(self, protocolEntity, protocolName):
"""Add a new protocol entity to the node under the given name.
After adding, the protocol entity is accessible as a public data
member of the node with the protoName as attribute name.
The method calls protocolEntity.install to give the protocol
the possibility to initialize itself.
Arguments:
protocolEntity : ProtocolEntity
protoName : String
Return value : None.
"""
setattr(self, protocolName, protocolEntity)
protocolEntity.install(self, protocolName)
class QAP(NIU):
"""
Implementation of an QoS Access Point (QAP) for a QoS Wireless Network.
"""
def __init__(self):
pass
def setNode(self, node, devicename):
"""Attach the NIU to a node.
Arguments:
node:Node -- Node to which the NIU is attached.
devicename:String -- Name to access the NIU on the node.
Return value: None.
"""
self._node = node
self.devicename = devicename
self.fullName = "%s.%s"%(node.hostname, devicename)
def attachToMedium(self, medium, position):
"""Attach the NIU to a transmission medium.
Call medium.attachNIU to inform it of its new NIU.
Arguments:
medium:Medium
position:PositionType -- Coordinates of the NIU on the medium
Return value: None."""
self.medium = medium
medium.attachNIU(self, position)
def addProtocol(self, protocolEntity, protocolName):
"""Add a new protocol entity to the node under the given name.
After adding, the protocol entity is accessible as a public data
member of the node with the protoName as attribute name.
The method calls protocolEntity.install to give the protocol
the possibility to initialize itself.
Arguments:
protocolEntity : ProtocolEntity
protoName : String
Return value : None.
"""
setattr(self, protocolName, protocolEntity)
protocolEntity.install(self, protocolName)
class WNIC(NIU):
"""
Implementation of an Wireless Network Interface Card (WNIC) for a Wireless Network.
"""
def __init__(self):
pass
def setNode(self, node, devicename):
"""Attach the NIU to a node.
Arguments:
node:Node -- Node to which the NIU is attached.
devicename:String -- Name to access the NIU on the node.
Return value: None.
"""
self._node = node
self.devicename = devicename
self.fullName = "%s.%s"%(node.hostname, devicename)
def attachToMedium(self, medium, position):
"""Attach the NIU to a transmission medium.
Call medium.attachNIU to inform it of its new NIU.
Arguments:
medium:Medium
position:PositionType -- Coordinates of the NIU on the medium
Return value: None."""
self.medium = medium
medium.attachNIU(self, position)
def addProtocol(self, protocolEntity, protocolName):
"""Add a new protocol entity to the node under the given name.
After adding, the protocol entity is accessible as a public data
member of the node with the protoName as attribute name.
The method calls protocolEntity.install to give the protocol
the possibility to initialize itself.
Arguments:
protocolEntity : ProtocolEntity
protoName : String
Return value : None.
"""
setattr(self, protocolName, protocolEntity)
protocolEntity.install(self, protocolName)
class QWNIC(NIU):
"""
Implementation of an QoS Wireless Network Interface Card (WNIC) for a QoS Wireless Network.
"""
def __init__(self):
pass
def setNode(self, node, devicename):
"""Attach the NIU to a node.
Arguments:
node:Node -- Node to which the NIU is attached.
devicename:String -- Name to access the NIU on the node.
Return value: None.
"""
self._node = node
self.devicename = devicename
self.fullName = "%s.%s"%(node.hostname, devicename)
def attachToMedium(self, medium, position):
"""Attach the NIU to a transmission medium.
Call medium.attachNIU to inform it of its new NIU.
Arguments:
medium:Medium
position:PositionType -- Coordinates of the NIU on the medium
Return value: None."""
self.medium = medium
medium.attachNIU(self, position)
def addProtocol(self, protocolEntity, protocolName):
"""Add a new protocol entity to the node under the given name.
After adding, the protocol entity is accessible as a public data
member of the node with the protoName as attribute name.
The method calls protocolEntity.install to give the protocol
the possibility to initialize itself.
Arguments:
protocolEntity : ProtocolEntity
protoName : String
Return value : None.
"""
setattr(self, protocolName, protocolEntity)
protocolEntity.install(self, protocolName)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('rotten_gut_remmer_king')
mobileTemplate.setLevel(60)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1.2)
mobileTemplate.setSocialGroup("remmer")
mobileTemplate.setAssistRange(12)
mobileTemplate.setStalker(False)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_rotten_gut_remmer_king.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_bite_4')
attacks.add('bm_bolster_armor_4')
attacks.add('bm_enfeeble_4')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('rotten_gut_remmer_king', mobileTemplate)
return
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/bin/sh
test_description='Basic fetch/push functionality.
This test checks the following functionality:
* command-line syntax
* refspecs
* fast-forward detection, and overriding it
* configuration
* hooks
* --porcelain output format
* hiderefs
* reflogs
* URL validation
'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
TEST_CREATE_REPO_NO_TEMPLATE=1
. ./test-lib.sh
D=$(pwd)
mk_empty () {
repo_name="$1"
test_when_finished "rm -rf \"$repo_name\"" &&
test_path_is_missing "$repo_name" &&
git init --template= "$repo_name" &&
mkdir "$repo_name"/.git/hooks &&
git -C "$repo_name" config receive.denyCurrentBranch warn
}
mk_test () {
repo_name="$1"
shift
mk_empty "$repo_name" &&
(
for ref in "$@"
do
git push "$repo_name" $the_first_commit:refs/$ref ||
exit
done &&
cd "$repo_name" &&
for ref in "$@"
do
echo "$the_first_commit" >expect &&
git show-ref -s --verify refs/$ref >actual &&
test_cmp expect actual ||
exit
done &&
git fsck --full
)
}
mk_test_with_hooks() {
repo_name=$1
mk_test "$@" &&
test_hook -C "$repo_name" pre-receive <<-'EOF' &&
cat - >>pre-receive.actual
EOF
test_hook -C "$repo_name" update <<-'EOF' &&
printf "%s %s %s\n" "$@" >>update.actual
EOF
test_hook -C "$repo_name" post-receive <<-'EOF' &&
cat - >>post-receive.actual
EOF
test_hook -C "$repo_name" post-update <<-'EOF'
for ref in "$@"
do
printf "%s\n" "$ref" >>post-update.actual
done
EOF
}
mk_child() {
test_when_finished "rm -rf \"$2\"" &&
git clone --template= "$1" "$2"
}
check_push_result () {
test $# -ge 3 ||
BUG "check_push_result requires at least 3 parameters"
repo_name="$1"
shift
(
cd "$repo_name" &&
echo "$1" >expect &&
shift &&
for ref in "$@"
do
git show-ref -s --verify refs/$ref >actual &&
test_cmp expect actual ||
exit
done &&
git fsck --full
)
}
test_expect_success setup '
>path1 &&
git add path1 &&
test_tick &&
git commit -a -m repo &&
the_first_commit=$(git show-ref -s --verify refs/heads/main) &&
>path2 &&
git add path2 &&
test_tick &&
git commit -a -m second &&
the_commit=$(git show-ref -s --verify refs/heads/main)
'
for cmd in push fetch
do
for opt in ipv4 ipv6
do
test_expect_success "reject 'git $cmd --no-$opt'" '
test_must_fail git $cmd --no-$opt 2>err &&
grep "unknown option .no-$opt" err
'
done
done
test_expect_success 'fetch without wildcard' '
mk_empty testrepo &&
(
cd testrepo &&
git fetch .. refs/heads/main:refs/remotes/origin/main &&
echo "$the_commit commit refs/remotes/origin/main" >expect &&
git for-each-ref refs/remotes/origin >actual &&
test_cmp expect actual
)
'
test_expect_success 'fetch with wildcard' '
mk_empty testrepo &&
(
cd testrepo &&
git config remote.up.url .. &&
git config remote.up.fetch "refs/heads/*:refs/remotes/origin/*" &&
git fetch up &&
echo "$the_commit commit refs/remotes/origin/main" >expect &&
git for-each-ref refs/remotes/origin >actual &&
test_cmp expect actual
)
'
test_expect_success 'fetch with insteadOf' '
mk_empty testrepo &&
(
TRASH=$(pwd)/ &&
cd testrepo &&
git config "url.$TRASH.insteadOf" trash/ &&
git config remote.up.url trash/. &&
git config remote.up.fetch "refs/heads/*:refs/remotes/origin/*" &&
git fetch up &&
echo "$the_commit commit refs/remotes/origin/main" >expect &&
git for-each-ref refs/remotes/origin >actual &&
test_cmp expect actual
)
'
test_expect_success 'fetch with pushInsteadOf (should not rewrite)' '
mk_empty testrepo &&
(
TRASH=$(pwd)/ &&
cd testrepo &&
git config "url.trash/.pushInsteadOf" "$TRASH" &&
git config remote.up.url "$TRASH." &&
git config remote.up.fetch "refs/heads/*:refs/remotes/origin/*" &&
git fetch up &&
echo "$the_commit commit refs/remotes/origin/main" >expect &&
git for-each-ref refs/remotes/origin >actual &&
test_cmp expect actual
)
'
grep_wrote () {
object_count=$1
file_name=$2
grep 'write_pack_file/wrote.*"value":"'$1'"' $2
}
test_expect_success 'push without negotiation' '
mk_empty testrepo &&
git push testrepo $the_first_commit:refs/remotes/origin/first_commit &&
test_commit -C testrepo unrelated_commit &&
git -C testrepo config receive.hideRefs refs/remotes/origin/first_commit &&
test_when_finished "rm event" &&
GIT_TRACE2_EVENT="$(pwd)/event" git -c protocol.version=2 push testrepo refs/heads/main:refs/remotes/origin/main &&
grep_wrote 5 event # 2 commits, 2 trees, 1 blob
'
test_expect_success 'push with negotiation' '
mk_empty testrepo &&
git push testrepo $the_first_commit:refs/remotes/origin/first_commit &&
test_commit -C testrepo unrelated_commit &&
git -C testrepo config receive.hideRefs refs/remotes/origin/first_commit &&
test_when_finished "rm event" &&
GIT_TRACE2_EVENT="$(pwd)/event" \
git -c protocol.version=2 -c push.negotiate=1 \
push testrepo refs/heads/main:refs/remotes/origin/main &&
grep \"key\":\"total_rounds\",\"value\":\"1\" event &&
grep_wrote 2 event # 1 commit, 1 tree
'
test_expect_success 'push with negotiation proceeds anyway even if negotiation fails' '
mk_empty testrepo &&
git push testrepo $the_first_commit:refs/remotes/origin/first_commit &&
test_commit -C testrepo unrelated_commit &&
git -C testrepo config receive.hideRefs refs/remotes/origin/first_commit &&
test_when_finished "rm event" &&
GIT_TEST_PROTOCOL_VERSION=0 GIT_TRACE2_EVENT="$(pwd)/event" \
git -c push.negotiate=1 push testrepo refs/heads/main:refs/remotes/origin/main 2>err &&
grep_wrote 5 event && # 2 commits, 2 trees, 1 blob
test_grep "push negotiation failed" err
'
test_expect_success 'push deletion with negotiation' '
mk_empty testrepo &&
git push testrepo $the_first_commit:refs/heads/master &&
git -c push.negotiate=1 push testrepo \
:master $the_first_commit:refs/heads/next 2>errors-2 &&
test_grep ! "negotiate-only needs one or " errors-2 &&
git -c push.negotiate=1 push testrepo :next 2>errors-1 &&
test_grep ! "negotiate-only needs one or " errors-1
'
test_expect_success 'push with negotiation does not attempt to fetch submodules' '
mk_empty submodule_upstream &&
test_commit -C submodule_upstream submodule_commit &&
test_config_global protocol.file.allow always &&
git submodule add ./submodule_upstream submodule &&
mk_empty testrepo &&
git push testrepo $the_first_commit:refs/remotes/origin/first_commit &&
test_commit -C testrepo unrelated_commit &&
git -C testrepo config receive.hideRefs refs/remotes/origin/first_commit &&
GIT_TRACE2_EVENT="$(pwd)/event" git -c submodule.recurse=true \
-c protocol.version=2 -c push.negotiate=1 \
push testrepo refs/heads/main:refs/remotes/origin/main 2>err &&
grep \"key\":\"total_rounds\",\"value\":\"1\" event &&
! grep "Fetching submodule" err
'
test_expect_success 'push without wildcard' '
mk_empty testrepo &&
git push testrepo refs/heads/main:refs/remotes/origin/main &&
(
cd testrepo &&
echo "$the_commit commit refs/remotes/origin/main" >expect &&
git for-each-ref refs/remotes/origin >actual &&
test_cmp expect actual
)
'
test_expect_success 'push with wildcard' '
mk_empty testrepo &&
git push testrepo "refs/heads/*:refs/remotes/origin/*" &&
(
cd testrepo &&
echo "$the_commit commit refs/remotes/origin/main" >expect &&
git for-each-ref refs/remotes/origin >actual &&
test_cmp expect actual
)
'
test_expect_success 'push with insteadOf' '
mk_empty testrepo &&
TRASH="$(pwd)/" &&
test_config "url.$TRASH.insteadOf" trash/ &&
git push trash/testrepo refs/heads/main:refs/remotes/origin/main &&
(
cd testrepo &&
echo "$the_commit commit refs/remotes/origin/main" >expect &&
git for-each-ref refs/remotes/origin >actual &&
test_cmp expect actual
)
'
test_expect_success 'push with pushInsteadOf' '
mk_empty testrepo &&
TRASH="$(pwd)/" &&
test_config "url.$TRASH.pushInsteadOf" trash/ &&
git push trash/testrepo refs/heads/main:refs/remotes/origin/main &&
(
cd testrepo &&
echo "$the_commit commit refs/remotes/origin/main" >expect &&
git for-each-ref refs/remotes/origin >actual &&
test_cmp expect actual
)
'
test_expect_success 'push with pushInsteadOf and explicit pushurl (pushInsteadOf should not rewrite)' '
mk_empty testrepo &&
test_config "url.trash2/.pushInsteadOf" testrepo/ &&
test_config "url.trash3/.pushInsteadOf" trash/wrong &&
test_config remote.r.url trash/wrong &&
test_config remote.r.pushurl "testrepo/" &&
git push r refs/heads/main:refs/remotes/origin/main &&
(
cd testrepo &&
echo "$the_commit commit refs/remotes/origin/main" >expect &&
git for-each-ref refs/remotes/origin >actual &&
test_cmp expect actual
)
'
test_expect_success 'push with matching heads' '
mk_test testrepo heads/main &&
git push testrepo : &&
check_push_result testrepo $the_commit heads/main
'
test_expect_success 'push with matching heads on the command line' '
mk_test testrepo heads/main &&
git push testrepo : &&
check_push_result testrepo $the_commit heads/main
'
test_expect_success 'failed (non-fast-forward) push with matching heads' '
mk_test testrepo heads/main &&
git push testrepo : &&
git commit --amend -massaged &&
test_must_fail git push testrepo &&
check_push_result testrepo $the_commit heads/main &&
git reset --hard $the_commit
'
test_expect_success 'push --force with matching heads' '
mk_test testrepo heads/main &&
git push testrepo : &&
git commit --amend -massaged &&
git push --force testrepo : &&
! check_push_result testrepo $the_commit heads/main &&
git reset --hard $the_commit
'
test_expect_success 'push with matching heads and forced update' '
mk_test testrepo heads/main &&
git push testrepo : &&
git commit --amend -massaged &&
git push testrepo +: &&
! check_push_result testrepo $the_commit heads/main &&
git reset --hard $the_commit
'
test_expect_success 'push with no ambiguity (1)' '
mk_test testrepo heads/main &&
git push testrepo main:main &&
check_push_result testrepo $the_commit heads/main
'
test_expect_success 'push with no ambiguity (2)' '
mk_test testrepo remotes/origin/main &&
git push testrepo main:origin/main &&
check_push_result testrepo $the_commit remotes/origin/main
'
test_expect_success 'push with colon-less refspec, no ambiguity' '
mk_test testrepo heads/main heads/t/main &&
git branch -f t/main main &&
git push testrepo main &&
check_push_result testrepo $the_commit heads/main &&
check_push_result testrepo $the_first_commit heads/t/main
'
test_expect_success 'push with weak ambiguity (1)' '
mk_test testrepo heads/main remotes/origin/main &&
git push testrepo main:main &&
check_push_result testrepo $the_commit heads/main &&
check_push_result testrepo $the_first_commit remotes/origin/main
'
test_expect_success 'push with weak ambiguity (2)' '
mk_test testrepo heads/main remotes/origin/main remotes/another/main &&
git push testrepo main:main &&
check_push_result testrepo $the_commit heads/main &&
check_push_result testrepo $the_first_commit remotes/origin/main remotes/another/main
'
test_expect_success 'push with ambiguity' '
mk_test testrepo heads/frotz tags/frotz &&
test_must_fail git push testrepo main:frotz &&
check_push_result testrepo $the_first_commit heads/frotz tags/frotz
'
test_expect_success 'push with onelevel ref' '
mk_test testrepo heads/main &&
test_must_fail git push testrepo HEAD:refs/onelevel
'
test_expect_success 'push with colon-less refspec (1)' '
mk_test testrepo heads/frotz tags/frotz &&
git branch -f frotz main &&
git push testrepo frotz &&
check_push_result testrepo $the_commit heads/frotz &&
check_push_result testrepo $the_first_commit tags/frotz
'
test_expect_success 'push with colon-less refspec (2)' '
mk_test testrepo heads/frotz tags/frotz &&
if git show-ref --verify -q refs/heads/frotz
then
git branch -D frotz
fi &&
git tag -f frotz &&
git push -f testrepo frotz &&
check_push_result testrepo $the_commit tags/frotz &&
check_push_result testrepo $the_first_commit heads/frotz
'
test_expect_success 'push with colon-less refspec (3)' '
mk_test testrepo &&
if git show-ref --verify -q refs/tags/frotz
then
git tag -d frotz
fi &&
git branch -f frotz main &&
git push testrepo frotz &&
check_push_result testrepo $the_commit heads/frotz &&
test 1 = $( cd testrepo && git show-ref | wc -l )
'
test_expect_success 'push with colon-less refspec (4)' '
mk_test testrepo &&
if git show-ref --verify -q refs/heads/frotz
then
git branch -D frotz
fi &&
git tag -f frotz &&
git push testrepo frotz &&
check_push_result testrepo $the_commit tags/frotz &&
test 1 = $( cd testrepo && git show-ref | wc -l )
'
test_expect_success 'push head with non-existent, incomplete dest' '
mk_test testrepo &&
git push testrepo main:branch &&
check_push_result testrepo $the_commit heads/branch
'
test_expect_success 'push tag with non-existent, incomplete dest' '
mk_test testrepo &&
git tag -f v1.0 &&
git push testrepo v1.0:tag &&
check_push_result testrepo $the_commit tags/tag
'
test_expect_success 'push oid with non-existent, incomplete dest' '
mk_test testrepo &&
test_must_fail git push testrepo $(git rev-parse main):foo
'
test_expect_success 'push ref expression with non-existent, incomplete dest' '
mk_test testrepo &&
test_must_fail git push testrepo main^:branch
'
test_expect_success 'push ref expression with non-existent oid src' '
mk_test testrepo &&
test_must_fail git push testrepo $(test_oid 001):branch
'
for head in HEAD @
do
test_expect_success "push with $head" '
mk_test testrepo heads/main &&
git checkout main &&
git push testrepo $head &&
check_push_result testrepo $the_commit heads/main
'
test_expect_success "push with $head nonexisting at remote" '
mk_test testrepo heads/main &&
git checkout -b local main &&
test_when_finished "git checkout main; git branch -D local" &&
git push testrepo $head &&
check_push_result testrepo $the_commit heads/local
'
test_expect_success "push with +$head" '
mk_test testrepo heads/main &&
git checkout -b local main &&
test_when_finished "git checkout main; git branch -D local" &&
git push testrepo main local &&
check_push_result testrepo $the_commit heads/main &&
check_push_result testrepo $the_commit heads/local &&
# Without force rewinding should fail
git reset --hard $head^ &&
test_must_fail git push testrepo $head &&
check_push_result testrepo $the_commit heads/local &&
# With force rewinding should succeed
git push testrepo +$head &&
check_push_result testrepo $the_first_commit heads/local
'
test_expect_success "push $head with non-existent, incomplete dest" '
mk_test testrepo &&
git checkout main &&
git push testrepo $head:branch &&
check_push_result testrepo $the_commit heads/branch
'
test_expect_success "push with config remote.*.push = $head" '
mk_test testrepo heads/local &&
git checkout main &&
git branch -f local $the_commit &&
test_when_finished "git branch -D local" &&
(
cd testrepo &&
git checkout local &&
git reset --hard $the_first_commit
) &&
test_config remote.there.url testrepo &&
test_config remote.there.push $head &&
test_config branch.main.remote there &&
git push &&
check_push_result testrepo $the_commit heads/main &&
check_push_result testrepo $the_first_commit heads/local
'
done
test_expect_success "push to remote with no explicit refspec and config remote.*.push = src:dest" '
mk_test testrepo heads/main &&
git checkout $the_first_commit &&
test_config remote.there.url testrepo &&
test_config remote.there.push refs/heads/main:refs/heads/main &&
git push there &&
check_push_result testrepo $the_commit heads/main
'
test_expect_success 'push with remote.pushdefault' '
mk_test up_repo heads/main &&
mk_test down_repo heads/main &&
test_config remote.up.url up_repo &&
test_config remote.down.url down_repo &&
test_config branch.main.remote up &&
test_config remote.pushdefault down &&
test_config push.default matching &&
git push &&
check_push_result up_repo $the_first_commit heads/main &&
check_push_result down_repo $the_commit heads/main
'
test_expect_success 'push with config remote.*.pushurl' '
mk_test testrepo heads/main &&
git checkout main &&
test_config remote.there.url test2repo &&
test_config remote.there.pushurl testrepo &&
git push there : &&
check_push_result testrepo $the_commit heads/main
'
test_expect_success 'push with config branch.*.pushremote' '
mk_test up_repo heads/main &&
mk_test side_repo heads/main &&
mk_test down_repo heads/main &&
test_config remote.up.url up_repo &&
test_config remote.pushdefault side_repo &&
test_config remote.down.url down_repo &&
test_config branch.main.remote up &&
test_config branch.main.pushremote down &&
test_config push.default matching &&
git push &&
check_push_result up_repo $the_first_commit heads/main &&
check_push_result side_repo $the_first_commit heads/main &&
check_push_result down_repo $the_commit heads/main
'
test_expect_success 'branch.*.pushremote config order is irrelevant' '
mk_test one_repo heads/main &&
mk_test two_repo heads/main &&
test_config remote.one.url one_repo &&
test_config remote.two.url two_repo &&
test_config branch.main.pushremote two_repo &&
test_config remote.pushdefault one_repo &&
test_config push.default matching &&
git push &&
check_push_result one_repo $the_first_commit heads/main &&
check_push_result two_repo $the_commit heads/main
'
test_expect_success 'push rejects empty branch name entries' '
mk_test one_repo heads/main &&
test_config remote.one.url one_repo &&
test_config branch..remote one &&
test_config branch..merge refs/heads/ &&
test_config branch.main.remote one &&
test_config branch.main.merge refs/heads/main &&
test_must_fail git push 2>err &&
grep "bad config variable .branch\.\." err
'
test_expect_success 'push ignores "branch." config without subsection' '
mk_test one_repo heads/main &&
test_config remote.one.url one_repo &&
test_config branch.autoSetupMerge true &&
test_config branch.main.remote one &&
test_config branch.main.merge refs/heads/main &&
git push
'
test_expect_success 'push with dry-run' '
mk_test testrepo heads/main &&
old_commit=$(git -C testrepo show-ref -s --verify refs/heads/main) &&
git push --dry-run testrepo : &&
check_push_result testrepo $old_commit heads/main
'
test_expect_success 'push updates local refs' '
mk_test testrepo heads/main &&
mk_child testrepo child &&
(
cd child &&
git pull .. main &&
git push &&
test $(git rev-parse main) = \
$(git rev-parse remotes/origin/main)
)
'
test_expect_success 'push updates up-to-date local refs' '
mk_test testrepo heads/main &&
mk_child testrepo child1 &&
mk_child testrepo child2 &&
(cd child1 && git pull .. main && git push) &&
(
cd child2 &&
git pull ../child1 main &&
git push &&
test $(git rev-parse main) = \
$(git rev-parse remotes/origin/main)
)
'
test_expect_success 'push preserves up-to-date packed refs' '
mk_test testrepo heads/main &&
mk_child testrepo child &&
(
cd child &&
git push &&
! test -f .git/refs/remotes/origin/main
)
'
test_expect_success 'push does not update local refs on failure' '
mk_test testrepo heads/main &&
mk_child testrepo child &&
echo "#!/no/frobnication/today" >testrepo/.git/hooks/pre-receive &&
chmod +x testrepo/.git/hooks/pre-receive &&
(
cd child &&
git pull .. main &&
test_must_fail git push &&
test $(git rev-parse main) != \
$(git rev-parse remotes/origin/main)
)
'
test_expect_success 'allow deleting an invalid remote ref' '
mk_test testrepo heads/branch &&
rm -f testrepo/.git/objects/??/* &&
git push testrepo :refs/heads/branch &&
(cd testrepo && test_must_fail git rev-parse --verify refs/heads/branch)
'
test_expect_success 'pushing valid refs triggers post-receive and post-update hooks' '
mk_test_with_hooks testrepo heads/main heads/next &&
orgmain=$(cd testrepo && git show-ref -s --verify refs/heads/main) &&
newmain=$(git show-ref -s --verify refs/heads/main) &&
orgnext=$(cd testrepo && git show-ref -s --verify refs/heads/next) &&
newnext=$ZERO_OID &&
git push testrepo refs/heads/main:refs/heads/main :refs/heads/next &&
(
cd testrepo/.git &&
cat >pre-receive.expect <<-EOF &&
$orgmain $newmain refs/heads/main
$orgnext $newnext refs/heads/next
EOF
cat >update.expect <<-EOF &&
refs/heads/next $orgnext $newnext
refs/heads/main $orgmain $newmain
EOF
cat >post-receive.expect <<-EOF &&
$orgmain $newmain refs/heads/main
$orgnext $newnext refs/heads/next
EOF
cat >post-update.expect <<-EOF &&
refs/heads/main
refs/heads/next
EOF
test_cmp pre-receive.expect pre-receive.actual &&
test_cmp update.expect update.actual &&
test_cmp post-receive.expect post-receive.actual &&
test_cmp post-update.expect post-update.actual
)
'
test_expect_success 'deleting dangling ref triggers hooks with correct args' '
mk_test_with_hooks testrepo heads/branch &&
orig=$(git -C testrepo rev-parse refs/heads/branch) &&
rm -f testrepo/.git/objects/??/* &&
git push testrepo :refs/heads/branch &&
(
cd testrepo/.git &&
cat >pre-receive.expect <<-EOF &&
$orig $ZERO_OID refs/heads/branch
EOF
cat >update.expect <<-EOF &&
refs/heads/branch $orig $ZERO_OID
EOF
cat >post-receive.expect <<-EOF &&
$orig $ZERO_OID refs/heads/branch
EOF
cat >post-update.expect <<-EOF &&
refs/heads/branch
EOF
test_cmp pre-receive.expect pre-receive.actual &&
test_cmp update.expect update.actual &&
test_cmp post-receive.expect post-receive.actual &&
test_cmp post-update.expect post-update.actual
)
'
test_expect_success 'deletion of a non-existent ref is not fed to post-receive and post-update hooks' '
mk_test_with_hooks testrepo heads/main &&
orgmain=$(cd testrepo && git show-ref -s --verify refs/heads/main) &&
newmain=$(git show-ref -s --verify refs/heads/main) &&
git push testrepo main :refs/heads/nonexistent &&
(
cd testrepo/.git &&
cat >pre-receive.expect <<-EOF &&
$orgmain $newmain refs/heads/main
$ZERO_OID $ZERO_OID refs/heads/nonexistent
EOF
cat >update.expect <<-EOF &&
refs/heads/nonexistent $ZERO_OID $ZERO_OID
refs/heads/main $orgmain $newmain
EOF
cat >post-receive.expect <<-EOF &&
$orgmain $newmain refs/heads/main
EOF
cat >post-update.expect <<-EOF &&
refs/heads/main
EOF
test_cmp pre-receive.expect pre-receive.actual &&
test_cmp update.expect update.actual &&
test_cmp post-receive.expect post-receive.actual &&
test_cmp post-update.expect post-update.actual
)
'
test_expect_success 'deletion of a non-existent ref alone does trigger post-receive and post-update hooks' '
mk_test_with_hooks testrepo heads/main &&
git push testrepo :refs/heads/nonexistent &&
(
cd testrepo/.git &&
cat >pre-receive.expect <<-EOF &&
$ZERO_OID $ZERO_OID refs/heads/nonexistent
EOF
cat >update.expect <<-EOF &&
refs/heads/nonexistent $ZERO_OID $ZERO_OID
EOF
test_cmp pre-receive.expect pre-receive.actual &&
test_cmp update.expect update.actual &&
test_path_is_missing post-receive.actual &&
test_path_is_missing post-update.actual
)
'
test_expect_success 'mixed ref updates, deletes, invalid deletes trigger hooks with correct input' '
mk_test_with_hooks testrepo heads/main heads/next heads/seen &&
orgmain=$(cd testrepo && git show-ref -s --verify refs/heads/main) &&
newmain=$(git show-ref -s --verify refs/heads/main) &&
orgnext=$(cd testrepo && git show-ref -s --verify refs/heads/next) &&
newnext=$ZERO_OID &&
orgseen=$(cd testrepo && git show-ref -s --verify refs/heads/seen) &&
newseen=$(git show-ref -s --verify refs/heads/main) &&
git push testrepo refs/heads/main:refs/heads/main \
refs/heads/main:refs/heads/seen :refs/heads/next \
:refs/heads/nonexistent &&
(
cd testrepo/.git &&
cat >pre-receive.expect <<-EOF &&
$orgmain $newmain refs/heads/main
$orgnext $newnext refs/heads/next
$orgseen $newseen refs/heads/seen
$ZERO_OID $ZERO_OID refs/heads/nonexistent
EOF
cat >update.expect <<-EOF &&
refs/heads/next $orgnext $newnext
refs/heads/nonexistent $ZERO_OID $ZERO_OID
refs/heads/main $orgmain $newmain
refs/heads/seen $orgseen $newseen
EOF
cat >post-receive.expect <<-EOF &&
$orgmain $newmain refs/heads/main
$orgnext $newnext refs/heads/next
$orgseen $newseen refs/heads/seen
EOF
cat >post-update.expect <<-EOF &&
refs/heads/main
refs/heads/next
refs/heads/seen
EOF
test_cmp pre-receive.expect pre-receive.actual &&
test_cmp update.expect update.actual &&
test_cmp post-receive.expect post-receive.actual &&
test_cmp post-update.expect post-update.actual
)
'
test_expect_success 'allow deleting a ref using --delete' '
mk_test testrepo heads/main &&
(cd testrepo && git config receive.denyDeleteCurrent warn) &&
git push testrepo --delete main &&
(cd testrepo && test_must_fail git rev-parse --verify refs/heads/main)
'
test_expect_success 'allow deleting a tag using --delete' '
mk_test testrepo heads/main &&
git tag -a -m dummy_message deltag heads/main &&
git push testrepo --tags &&
(cd testrepo && git rev-parse --verify -q refs/tags/deltag) &&
git push testrepo --delete tag deltag &&
(cd testrepo && test_must_fail git rev-parse --verify refs/tags/deltag)
'
test_expect_success 'push --delete without args aborts' '
mk_test testrepo heads/main &&
test_must_fail git push testrepo --delete
'
test_expect_success 'push --delete refuses src:dest refspecs' '
mk_test testrepo heads/main &&
test_must_fail git push testrepo --delete main:foo
'
test_expect_success 'push --delete refuses empty string' '
mk_test testrepo heads/master &&
test_must_fail git push testrepo --delete ""
'
test_expect_success 'push --delete onelevel refspecs' '
mk_test testrepo heads/main &&
git -C testrepo update-ref refs/onelevel refs/heads/main &&
git push testrepo --delete refs/onelevel &&
test_must_fail git -C testrepo rev-parse --verify refs/onelevel
'
test_expect_success 'warn on push to HEAD of non-bare repository' '
mk_test testrepo heads/main &&
(
cd testrepo &&
git checkout main &&
git config receive.denyCurrentBranch warn
) &&
git push testrepo main 2>stderr &&
grep "warning: updating the current branch" stderr
'
test_expect_success 'deny push to HEAD of non-bare repository' '
mk_test testrepo heads/main &&
(
cd testrepo &&
git checkout main &&
git config receive.denyCurrentBranch true
) &&
test_must_fail git push testrepo main
'
test_expect_success 'allow push to HEAD of bare repository (bare)' '
mk_test testrepo heads/main &&
(
cd testrepo &&
git checkout main &&
git config receive.denyCurrentBranch true &&
git config core.bare true
) &&
git push testrepo main 2>stderr &&
! grep "warning: updating the current branch" stderr
'
test_expect_success 'allow push to HEAD of non-bare repository (config)' '
mk_test testrepo heads/main &&
(
cd testrepo &&
git checkout main &&
git config receive.denyCurrentBranch false
) &&
git push testrepo main 2>stderr &&
! grep "warning: updating the current branch" stderr
'
test_expect_success !WITH_BREAKING_CHANGES 'fetch with branches' '
mk_empty testrepo &&
git branch second $the_first_commit &&
git checkout second &&
mkdir testrepo/.git/branches &&
echo ".." > testrepo/.git/branches/branch1 &&
(
cd testrepo &&
git fetch branch1 &&
echo "$the_commit commit refs/heads/branch1" >expect &&
git for-each-ref refs/heads >actual &&
test_cmp expect actual
) &&
git checkout main
'
test_expect_success !WITH_BREAKING_CHANGES 'fetch with branches containing #' '
mk_empty testrepo &&
mkdir testrepo/.git/branches &&
echo "..#second" > testrepo/.git/branches/branch2 &&
(
cd testrepo &&
git fetch branch2 &&
echo "$the_first_commit commit refs/heads/branch2" >expect &&
git for-each-ref refs/heads >actual &&
test_cmp expect actual
) &&
git checkout main
'
test_expect_success !WITH_BREAKING_CHANGES 'push with branches' '
mk_empty testrepo &&
git checkout second &&
test_when_finished "rm -rf .git/branches" &&
mkdir .git/branches &&
echo "testrepo" > .git/branches/branch1 &&
git push branch1 &&
(
cd testrepo &&
echo "$the_first_commit commit refs/heads/main" >expect &&
git for-each-ref refs/heads >actual &&
test_cmp expect actual
)
'
test_expect_success !WITH_BREAKING_CHANGES 'push with branches containing #' '
mk_empty testrepo &&
test_when_finished "rm -rf .git/branches" &&
mkdir .git/branches &&
echo "testrepo#branch3" > .git/branches/branch2 &&
git push branch2 &&
(
cd testrepo &&
echo "$the_first_commit commit refs/heads/branch3" >expect &&
git for-each-ref refs/heads >actual &&
test_cmp expect actual
) &&
git checkout main
'
test_expect_success 'push into aliased refs (consistent)' '
mk_test testrepo heads/main &&
mk_child testrepo child1 &&
mk_child testrepo child2 &&
(
cd child1 &&
git branch foo &&
git symbolic-ref refs/heads/bar refs/heads/foo &&
git config receive.denyCurrentBranch false
) &&
(
cd child2 &&
>path2 &&
git add path2 &&
test_tick &&
git commit -a -m child2 &&
git branch foo &&
git branch bar &&
git push ../child1 foo bar
)
'
test_expect_success 'push into aliased refs (inconsistent)' '
mk_test testrepo heads/main &&
mk_child testrepo child1 &&
mk_child testrepo child2 &&
(
cd child1 &&
git branch foo &&
git symbolic-ref refs/heads/bar refs/heads/foo &&
git config receive.denyCurrentBranch false
) &&
(
cd child2 &&
>path2 &&
git add path2 &&
test_tick &&
git commit -a -m child2 &&
git branch foo &&
>path3 &&
git add path3 &&
test_tick &&
git commit -a -m child2 &&
git branch bar &&
test_must_fail git push ../child1 foo bar 2>stderr &&
grep "refusing inconsistent update" stderr
)
'
test_force_push_tag () {
tag_type_description=$1
tag_args=$2
test_expect_success "force pushing required to update $tag_type_description" "
mk_test testrepo heads/main &&
mk_child testrepo child1 &&
mk_child testrepo child2 &&
(
cd child1 &&
git tag testTag &&
git push ../child2 testTag &&
>file1 &&
git add file1 &&
git commit -m 'file1' &&
git tag $tag_args testTag &&
test_must_fail git push ../child2 testTag &&
git push --force ../child2 testTag &&
git tag $tag_args testTag HEAD~ &&
test_must_fail git push ../child2 testTag &&
git push --force ../child2 testTag &&
# Clobbering without + in refspec needs --force
git tag -f testTag &&
test_must_fail git push ../child2 'refs/tags/*:refs/tags/*' &&
git push --force ../child2 'refs/tags/*:refs/tags/*' &&
# Clobbering with + in refspec does not need --force
git tag -f testTag HEAD~ &&
git push ../child2 '+refs/tags/*:refs/tags/*' &&
# Clobbering with --no-force still obeys + in refspec
git tag -f testTag &&
git push --no-force ../child2 '+refs/tags/*:refs/tags/*' &&
# Clobbering with/without --force and 'tag <name>' format
git tag -f testTag HEAD~ &&
test_must_fail git push ../child2 tag testTag &&
git push --force ../child2 tag testTag
)
"
}
test_force_push_tag "lightweight tag" "-f"
test_force_push_tag "annotated tag" "-f -a -m'tag message'"
test_force_fetch_tag () {
tag_type_description=$1
tag_args=$2
test_expect_success "fetch will not clobber an existing $tag_type_description without --force" "
mk_test testrepo heads/main &&
mk_child testrepo child1 &&
mk_child testrepo child2 &&
(
cd testrepo &&
git tag testTag &&
git -C ../child1 fetch origin tag testTag &&
>file1 &&
git add file1 &&
git commit -m 'file1' &&
git tag $tag_args testTag &&
test_must_fail git -C ../child1 fetch origin tag testTag &&
git -C ../child1 fetch origin '+refs/tags/*:refs/tags/*'
)
"
}
test_force_fetch_tag "lightweight tag" "-f"
test_force_fetch_tag "annotated tag" "-f -a -m'tag message'"
test_expect_success 'push --porcelain' '
mk_empty testrepo &&
echo >.git/foo "To testrepo" &&
echo >>.git/foo "* refs/heads/main:refs/remotes/origin/main [new reference]" &&
echo >>.git/foo "Done" &&
git push >.git/bar --porcelain testrepo refs/heads/main:refs/remotes/origin/main &&
(
cd testrepo &&
echo "$the_commit commit refs/remotes/origin/main" >expect &&
git for-each-ref refs/remotes/origin >actual &&
test_cmp expect actual
) &&
test_cmp .git/foo .git/bar
'
test_expect_success 'push --porcelain bad url' '
mk_empty testrepo &&
test_must_fail git push >.git/bar --porcelain asdfasdfasd refs/heads/main:refs/remotes/origin/main &&
! grep -q Done .git/bar
'
test_expect_success 'push --porcelain rejected' '
mk_empty testrepo &&
git push testrepo refs/heads/main:refs/remotes/origin/main &&
(cd testrepo &&
git reset --hard origin/main^ &&
git config receive.denyCurrentBranch true) &&
echo >.git/foo "To testrepo" &&
echo >>.git/foo "! refs/heads/main:refs/heads/main [remote rejected] (branch is currently checked out)" &&
echo >>.git/foo "Done" &&
test_must_fail git push >.git/bar --porcelain testrepo refs/heads/main:refs/heads/main &&
test_cmp .git/foo .git/bar
'
test_expect_success 'push --porcelain --dry-run rejected' '
mk_empty testrepo &&
git push testrepo refs/heads/main:refs/remotes/origin/main &&
(cd testrepo &&
git reset --hard origin/main &&
git config receive.denyCurrentBranch true) &&
echo >.git/foo "To testrepo" &&
echo >>.git/foo "! refs/heads/main^:refs/heads/main [rejected] (non-fast-forward)" &&
echo >>.git/foo "Done" &&
test_must_fail git push >.git/bar --porcelain --dry-run testrepo refs/heads/main^:refs/heads/main &&
test_cmp .git/foo .git/bar
'
test_expect_success 'push --prune' '
mk_test testrepo heads/main heads/foo heads/bar &&
git push --prune testrepo : &&
check_push_result testrepo $the_commit heads/main &&
! check_push_result testrepo $the_first_commit heads/foo heads/bar
'
test_expect_success 'push --prune refspec' '
mk_test testrepo tmp/main tmp/foo tmp/bar &&
git push --prune testrepo "refs/heads/*:refs/tmp/*" &&
check_push_result testrepo $the_commit tmp/main &&
! check_push_result testrepo $the_first_commit tmp/foo tmp/bar
'
for configsection in transfer receive
do
test_expect_success "push to update a ref hidden by $configsection.hiderefs" '
mk_test testrepo heads/main hidden/one hidden/two hidden/three &&
(
cd testrepo &&
git config $configsection.hiderefs refs/hidden
) &&
# push to unhidden ref succeeds normally
git push testrepo main:refs/heads/main &&
check_push_result testrepo $the_commit heads/main &&
# push to update a hidden ref should fail
test_must_fail git push testrepo main:refs/hidden/one &&
check_push_result testrepo $the_first_commit hidden/one &&
# push to delete a hidden ref should fail
test_must_fail git push testrepo :refs/hidden/two &&
check_push_result testrepo $the_first_commit hidden/two &&
# idempotent push to update a hidden ref should fail
test_must_fail git push testrepo $the_first_commit:refs/hidden/three &&
check_push_result testrepo $the_first_commit hidden/three
'
done
test_expect_success 'fetch exact oid' '
mk_test testrepo heads/main hidden/one &&
git push testrepo main:refs/hidden/one &&
(
cd testrepo &&
git config transfer.hiderefs refs/hidden
) &&
check_push_result testrepo $the_commit hidden/one &&
mk_child testrepo child &&
(
cd child &&
# make sure $the_commit does not exist here
git repack -a -d &&
git prune &&
test_must_fail git cat-file -t $the_commit &&
# Some protocol versions (e.g. 2) support fetching
# unadvertised objects, so restrict this test to v0.
# fetching the hidden object should fail by default
test_must_fail env GIT_TEST_PROTOCOL_VERSION=0 \
git fetch -v ../testrepo $the_commit:refs/heads/copy 2>err &&
test_grep "Server does not allow request for unadvertised object" err &&
test_must_fail git rev-parse --verify refs/heads/copy &&
# the server side can allow it to succeed
(
cd ../testrepo &&
git config uploadpack.allowtipsha1inwant true
) &&
git fetch -v ../testrepo $the_commit:refs/heads/copy main:refs/heads/extra &&
cat >expect <<-EOF &&
$the_commit
$the_first_commit
EOF
{
git rev-parse --verify refs/heads/copy &&
git rev-parse --verify refs/heads/extra
} >actual &&
test_cmp expect actual
)
'
test_expect_success 'fetch exact oid in protocol v2' '
mk_test testrepo heads/main hidden/one &&
git push testrepo main:refs/hidden/one &&
git -C testrepo config transfer.hiderefs refs/hidden &&
check_push_result testrepo $the_commit hidden/one &&
mk_child testrepo child &&
git -C child config protocol.version 2 &&
# make sure $the_commit does not exist here
git -C child repack -a -d &&
git -C child prune &&
test_must_fail git -C child cat-file -t $the_commit &&
# fetching the hidden object succeeds by default
GIT_TRACE_PACKET=$PWD/trace.out \
git -C child fetch -v ../testrepo $the_commit:refs/heads/copy &&
test_grep ! "ref-prefix.*$the_commit" trace.out
'
for configallowtipsha1inwant in true false
do
test_expect_success "shallow fetch reachable SHA1 (but not a ref), allowtipsha1inwant=$configallowtipsha1inwant" '
mk_empty testrepo &&
(
cd testrepo &&
git config uploadpack.allowtipsha1inwant $configallowtipsha1inwant &&
git commit --allow-empty -m foo &&
git commit --allow-empty -m bar
) &&
SHA1=$(git --git-dir=testrepo/.git rev-parse HEAD^) &&
mk_empty shallow &&
(
cd shallow &&
# Some protocol versions (e.g. 2) support fetching
# unadvertised objects, so restrict this test to v0.
test_must_fail env GIT_TEST_PROTOCOL_VERSION=0 \
git fetch --depth=1 ../testrepo/.git $SHA1 &&
git --git-dir=../testrepo/.git config uploadpack.allowreachablesha1inwant true &&
git fetch --depth=1 ../testrepo/.git $SHA1 &&
git cat-file commit $SHA1
)
'
test_expect_success "deny fetch unreachable SHA1, allowtipsha1inwant=$configallowtipsha1inwant" '
mk_empty testrepo &&
(
cd testrepo &&
git config uploadpack.allowtipsha1inwant $configallowtipsha1inwant &&
git commit --allow-empty -m foo &&
git commit --allow-empty -m bar &&
git commit --allow-empty -m xyz
) &&
SHA1_1=$(git --git-dir=testrepo/.git rev-parse HEAD^^) &&
SHA1_2=$(git --git-dir=testrepo/.git rev-parse HEAD^) &&
SHA1_3=$(git --git-dir=testrepo/.git rev-parse HEAD) &&
(
cd testrepo &&
git reset --hard $SHA1_2 &&
git cat-file commit $SHA1_1 &&
git cat-file commit $SHA1_3
) &&
mk_empty shallow &&
(
cd shallow &&
# Some protocol versions (e.g. 2) support fetching
# unadvertised objects, so restrict this test to v0.
test_must_fail env GIT_TEST_PROTOCOL_VERSION=0 \
git fetch ../testrepo/.git $SHA1_3 &&
test_must_fail env GIT_TEST_PROTOCOL_VERSION=0 \
git fetch ../testrepo/.git $SHA1_1 &&
git --git-dir=../testrepo/.git config uploadpack.allowreachablesha1inwant true &&
git fetch ../testrepo/.git $SHA1_1 &&
git cat-file commit $SHA1_1 &&
test_must_fail git cat-file commit $SHA1_2 &&
git fetch ../testrepo/.git $SHA1_2 &&
git cat-file commit $SHA1_2 &&
test_must_fail env GIT_TEST_PROTOCOL_VERSION=0 \
git fetch ../testrepo/.git $SHA1_3 2>err &&
# ideally we would insist this be on a "remote error:"
# line, but it is racy; see the commit message
test_grep "not our ref.*$SHA1_3\$" err
)
'
done
test_expect_success 'fetch follows tags by default' '
mk_test testrepo heads/main &&
test_when_finished "rm -rf src" &&
git init src &&
(
cd src &&
git pull ../testrepo main &&
git tag -m "annotated" tag &&
git for-each-ref >tmp1 &&
sed -n "p; s|refs/heads/main$|refs/remotes/origin/main|p" tmp1 |
sed -n "p; s|refs/heads/main$|refs/remotes/origin/HEAD|p" |
sort -k 4 >../expect
) &&
test_when_finished "rm -rf dst" &&
git init dst &&
(
cd dst &&
git remote add origin ../src &&
git config branch.main.remote origin &&
git config branch.main.merge refs/heads/main &&
git pull &&
git for-each-ref >../actual
) &&
test_cmp expect actual
'
test_expect_success 'peeled advertisements are not considered ref tips' '
mk_empty testrepo &&
git -C testrepo commit --allow-empty -m one &&
git -C testrepo commit --allow-empty -m two &&
git -C testrepo tag -m foo mytag HEAD^ &&
oid=$(git -C testrepo rev-parse mytag^{commit}) &&
test_must_fail env GIT_TEST_PROTOCOL_VERSION=0 \
git fetch testrepo $oid 2>err &&
test_grep "Server does not allow request for unadvertised object" err
'
test_expect_success 'pushing a specific ref applies remote.$name.push as refmap' '
mk_test testrepo heads/main &&
test_when_finished "rm -rf src" &&
git init src &&
test_when_finished "rm -rf dst" &&
git init --bare dst &&
(
cd src &&
git pull ../testrepo main &&
git branch next &&
git config remote.dst.url ../dst &&
git config remote.dst.push "+refs/heads/*:refs/remotes/src/*" &&
git push dst main &&
git show-ref refs/heads/main |
sed -e "s|refs/heads/|refs/remotes/src/|" >../dst/expect
) &&
(
cd dst &&
test_must_fail git show-ref refs/heads/next &&
test_must_fail git show-ref refs/heads/main &&
git show-ref refs/remotes/src/main >actual
) &&
test_cmp dst/expect dst/actual
'
test_expect_success 'with no remote.$name.push, it is not used as refmap' '
mk_test testrepo heads/main &&
test_when_finished "rm -rf src" &&
git init src &&
test_when_finished "rm -rf dst" &&
git init --bare dst &&
(
cd src &&
git pull ../testrepo main &&
git branch next &&
git config remote.dst.url ../dst &&
git config push.default matching &&
git push dst main &&
git show-ref refs/heads/main >../dst/expect
) &&
(
cd dst &&
test_must_fail git show-ref refs/heads/next &&
git show-ref refs/heads/main >actual
) &&
test_cmp dst/expect dst/actual
'
test_expect_success 'with no remote.$name.push, upstream mapping is used' '
mk_test testrepo heads/main &&
test_when_finished "rm -rf src" &&
git init src &&
test_when_finished "rm -rf dst" &&
git init --bare dst &&
(
cd src &&
git pull ../testrepo main &&
git branch next &&
git config remote.dst.url ../dst &&
git config remote.dst.fetch "+refs/heads/*:refs/remotes/dst/*" &&
git config push.default upstream &&
git config branch.main.merge refs/heads/trunk &&
git config branch.main.remote dst &&
git push dst main &&
git show-ref refs/heads/main |
sed -e "s|refs/heads/main|refs/heads/trunk|" >../dst/expect
) &&
(
cd dst &&
test_must_fail git show-ref refs/heads/main &&
test_must_fail git show-ref refs/heads/next &&
git show-ref refs/heads/trunk >actual
) &&
test_cmp dst/expect dst/actual
'
test_expect_success 'push does not follow tags by default' '
mk_test testrepo heads/main &&
test_when_finished "rm -rf src" &&
git init src &&
test_when_finished "rm -rf dst" &&
git init --bare dst &&
(
cd src &&
git pull ../testrepo main &&
git tag -m "annotated" tag &&
git checkout -b another &&
git commit --allow-empty -m "future commit" &&
git tag -m "future" future &&
git checkout main &&
git for-each-ref refs/heads/main >../expect &&
git push ../dst main
) &&
(
cd dst &&
git for-each-ref >../actual
) &&
test_cmp expect actual
'
test_expect_success 'push --follow-tags only pushes relevant tags' '
mk_test testrepo heads/main &&
test_when_finished "rm -rf src" &&
git init src &&
test_when_finished "rm -rf dst" &&
git init --bare dst &&
(
cd src &&
git pull ../testrepo main &&
git tag -m "annotated" tag &&
git checkout -b another &&
git commit --allow-empty -m "future commit" &&
git tag -m "future" future &&
git checkout main &&
git for-each-ref refs/heads/main refs/tags/tag >../expect &&
git push --follow-tags ../dst main
) &&
(
cd dst &&
git for-each-ref >../actual
) &&
test_cmp expect actual
'
test_expect_success 'push --no-thin must produce non-thin pack' '
cat >>path1 <<\EOF &&
keep base version of path1 big enough, compared to the new changes
later, in order to pass size heuristics in
builtin/pack-objects.c:try_delta()
EOF
git commit -am initial &&
git init no-thin &&
git --git-dir=no-thin/.git config receive.unpacklimit 0 &&
git push no-thin/.git refs/heads/main:refs/heads/foo &&
echo modified >> path1 &&
git commit -am modified &&
git repack -adf &&
rcvpck="git receive-pack --reject-thin-pack-for-testing" &&
git push --no-thin --receive-pack="$rcvpck" no-thin/.git refs/heads/main:refs/heads/foo
'
test_expect_success 'pushing a tag pushes the tagged object' '
blob=$(echo unreferenced | git hash-object -w --stdin) &&
git tag -m foo tag-of-blob $blob &&
test_when_finished "rm -rf dst.git" &&
git init --bare dst.git &&
git push dst.git tag-of-blob &&
# the receiving index-pack should have noticed
# any problems, but we double check
echo unreferenced >expect &&
git --git-dir=dst.git cat-file blob tag-of-blob >actual &&
test_cmp expect actual
'
test_expect_success 'push into bare respects core.logallrefupdates' '
test_when_finished "rm -rf dst.git" &&
git init --bare dst.git &&
git -C dst.git config core.logallrefupdates true &&
# double push to test both with and without
# the actual pack transfer
git push dst.git main:one &&
echo "one@{0} push" >expect &&
git -C dst.git log -g --format="%gd %gs" one >actual &&
test_cmp expect actual &&
git push dst.git main:two &&
echo "two@{0} push" >expect &&
git -C dst.git log -g --format="%gd %gs" two >actual &&
test_cmp expect actual
'
test_expect_success 'fetch into bare respects core.logallrefupdates' '
test_when_finished "rm -rf dst.git" &&
git init --bare dst.git &&
(
cd dst.git &&
git config core.logallrefupdates true &&
# as above, we double-fetch to test both
# with and without pack transfer
git fetch .. main:one &&
echo "one@{0} fetch .. main:one: storing head" >expect &&
git log -g --format="%gd %gs" one >actual &&
test_cmp expect actual &&
git fetch .. main:two &&
echo "two@{0} fetch .. main:two: storing head" >expect &&
git log -g --format="%gd %gs" two >actual &&
test_cmp expect actual
)
'
test_expect_success 'receive.denyCurrentBranch = updateInstead' '
mk_empty testrepo &&
git push testrepo main &&
(
cd testrepo &&
git reset --hard &&
git config receive.denyCurrentBranch updateInstead
) &&
test_commit third path2 &&
# Try pushing into a repository with pristine working tree
git push testrepo main &&
(
cd testrepo &&
git update-index -q --refresh &&
git diff-files --quiet -- &&
git diff-index --quiet --cached HEAD -- &&
test third = "$(cat path2)" &&
test $(git -C .. rev-parse HEAD) = $(git rev-parse HEAD)
) &&
# Try pushing into a repository with working tree needing a refresh
(
cd testrepo &&
git reset --hard HEAD^ &&
test $(git -C .. rev-parse HEAD^) = $(git rev-parse HEAD) &&
test-tool chmtime +100 path1
) &&
git push testrepo main &&
(
cd testrepo &&
git update-index -q --refresh &&
git diff-files --quiet -- &&
git diff-index --quiet --cached HEAD -- &&
test_cmp ../path1 path1 &&
test third = "$(cat path2)" &&
test $(git -C .. rev-parse HEAD) = $(git rev-parse HEAD)
) &&
# Update what is to be pushed
test_commit fourth path2 &&
# Try pushing into a repository with a dirty working tree
# (1) the working tree updated
(
cd testrepo &&
echo changed >path1
) &&
test_must_fail git push testrepo main &&
(
cd testrepo &&
test $(git -C .. rev-parse HEAD^) = $(git rev-parse HEAD) &&
git diff --quiet --cached &&
test changed = "$(cat path1)"
) &&
# (2) the index updated
(
cd testrepo &&
echo changed >path1 &&
git add path1
) &&
test_must_fail git push testrepo main &&
(
cd testrepo &&
test $(git -C .. rev-parse HEAD^) = $(git rev-parse HEAD) &&
git diff --quiet &&
test changed = "$(cat path1)"
) &&
# Introduce a new file in the update
test_commit fifth path3 &&
# (3) the working tree has an untracked file that would interfere
(
cd testrepo &&
git reset --hard &&
echo changed >path3
) &&
test_must_fail git push testrepo main &&
(
cd testrepo &&
test $(git -C .. rev-parse HEAD^^) = $(git rev-parse HEAD) &&
git diff --quiet &&
git diff --quiet --cached &&
test changed = "$(cat path3)"
) &&
# (4) the target changes to what gets pushed but it still is a change
(
cd testrepo &&
git reset --hard &&
echo fifth >path3 &&
git add path3
) &&
test_must_fail git push testrepo main &&
(
cd testrepo &&
test $(git -C .. rev-parse HEAD^^) = $(git rev-parse HEAD) &&
git diff --quiet &&
test fifth = "$(cat path3)"
) &&
# (5) push into void
test_when_finished "rm -rf void" &&
git init void &&
(
cd void &&
git config receive.denyCurrentBranch updateInstead
) &&
git push void main &&
(
cd void &&
test $(git -C .. rev-parse main) = $(git rev-parse HEAD) &&
git diff --quiet &&
git diff --cached --quiet
) &&
# (6) updateInstead intervened by fast-forward check
test_must_fail git push void main^:main &&
test $(git -C void rev-parse HEAD) = $(git rev-parse main) &&
git -C void diff --quiet &&
git -C void diff --cached --quiet
'
test_expect_success 'updateInstead with push-to-checkout hook' '
test_when_finished "rm -rf testrepo" &&
git init testrepo &&
git -C testrepo pull .. main &&
git -C testrepo reset --hard HEAD^^ &&
git -C testrepo tag initial &&
git -C testrepo config receive.denyCurrentBranch updateInstead &&
test_hook -C testrepo push-to-checkout <<-\EOF &&
echo >&2 updating from $(git rev-parse HEAD)
echo >&2 updating to "$1"
git update-index -q --refresh &&
git read-tree -u -m HEAD "$1" || {
status=$?
echo >&2 read-tree failed
exit $status
}
EOF
# Try pushing into a pristine
git push testrepo main &&
(
cd testrepo &&
git diff --quiet &&
git diff HEAD --quiet &&
test $(git -C .. rev-parse HEAD) = $(git rev-parse HEAD)
) &&
# Try pushing into a repository with conflicting change
(
cd testrepo &&
git reset --hard initial &&
echo conflicting >path2
) &&
test_must_fail git push testrepo main &&
(
cd testrepo &&
test $(git rev-parse initial) = $(git rev-parse HEAD) &&
test conflicting = "$(cat path2)" &&
git diff-index --quiet --cached HEAD
) &&
# Try pushing into a repository with unrelated change
(
cd testrepo &&
git reset --hard initial &&
echo unrelated >path1 &&
echo irrelevant >path5 &&
git add path5
) &&
git push testrepo main &&
(
cd testrepo &&
test "$(cat path1)" = unrelated &&
test "$(cat path5)" = irrelevant &&
test "$(git diff --name-only --cached HEAD)" = path5 &&
test $(git -C .. rev-parse HEAD) = $(git rev-parse HEAD)
) &&
# push into void
test_when_finished "rm -rf void" &&
git init void &&
git -C void config receive.denyCurrentBranch updateInstead &&
test_hook -C void push-to-checkout <<-\EOF &&
if git rev-parse --quiet --verify HEAD
then
has_head=yes
echo >&2 updating from $(git rev-parse HEAD)
else
has_head=no
echo >&2 pushing into void
fi
echo >&2 updating to "$1"
git update-index -q --refresh &&
case "$has_head" in
yes)
git read-tree -u -m HEAD "$1" ;;
no)
git read-tree -u -m "$1" ;;
esac || {
status=$?
echo >&2 read-tree failed
exit $status
}
EOF
git push void main &&
(
cd void &&
git diff --quiet &&
git diff --cached --quiet &&
test $(git -C .. rev-parse HEAD) = $(git rev-parse HEAD)
)
'
test_expect_success 'denyCurrentBranch and worktrees' '
git worktree add new-wt &&
git clone . cloned &&
test_commit -C cloned first &&
test_config receive.denyCurrentBranch refuse &&
test_must_fail git -C cloned push origin HEAD:new-wt &&
test_config receive.denyCurrentBranch updateInstead &&
git -C cloned push origin HEAD:new-wt &&
test_path_exists new-wt/first.t &&
test_must_fail git -C cloned push --delete origin new-wt
'
test_expect_success 'denyCurrentBranch and bare repository worktrees' '
test_when_finished "rm -fr bare.git" &&
git clone --bare . bare.git &&
git -C bare.git worktree add wt &&
test_commit grape &&
git -C bare.git config receive.denyCurrentBranch refuse &&
test_must_fail git push bare.git HEAD:wt &&
git -C bare.git config receive.denyCurrentBranch updateInstead &&
git push bare.git HEAD:wt &&
test_path_exists bare.git/wt/grape.t &&
test_must_fail git push --delete bare.git wt
'
test_expect_success 'refuse fetch to current branch of worktree' '
test_when_finished "git worktree remove --force wt && git branch -D wt" &&
git worktree add wt &&
test_commit apple &&
test_must_fail git fetch . HEAD:wt &&
git fetch -u . HEAD:wt
'
test_expect_success 'refuse fetch to current branch of bare repository worktree' '
test_when_finished "rm -fr bare.git" &&
git clone --bare . bare.git &&
git -C bare.git worktree add wt &&
test_commit banana &&
test_must_fail git -C bare.git fetch .. HEAD:wt &&
git -C bare.git fetch -u .. HEAD:wt
'
test_expect_success 'refuse to push a hidden ref, and make sure do not pollute the repository' '
mk_empty testrepo &&
git -C testrepo config receive.hiderefs refs/hidden &&
git -C testrepo config receive.unpackLimit 1 &&
test_must_fail git push testrepo HEAD:refs/hidden/foo &&
test_dir_is_empty testrepo/.git/objects/pack
'
test_expect_success 'push with config push.useBitmaps' '
mk_test testrepo heads/main &&
git checkout main &&
test_unconfig push.useBitmaps &&
GIT_TRACE2_EVENT="$PWD/default" \
git push --quiet testrepo main:test &&
test_subcommand git pack-objects --all-progress-implied --revs --stdout \
--thin --delta-base-offset -q <default &&
test_config push.useBitmaps true &&
GIT_TRACE2_EVENT="$PWD/true" \
git push --quiet testrepo main:test2 &&
test_subcommand git pack-objects --all-progress-implied --revs --stdout \
--thin --delta-base-offset -q <true &&
test_config push.useBitmaps false &&
GIT_TRACE2_EVENT="$PWD/false" \
git push --quiet testrepo main:test3 &&
test_subcommand git pack-objects --all-progress-implied --revs --stdout \
--thin --delta-base-offset -q --no-use-bitmap-index <false
'
test_expect_success 'push with config pack.usePathWalk=true' '
mk_test testrepo heads/main &&
git checkout main &&
test_config pack.usePathWalk true &&
GIT_TRACE2_EVENT="$(pwd)/path-walk.txt" \
git push --quiet testrepo main:test &&
test_region pack-objects path-walk path-walk.txt
'
test_expect_success 'push with F/D conflict with deletion and creation' '
test_when_finished "git branch -D branch" &&
git branch branch/conflict &&
mk_test testrepo heads/branch/conflict &&
git branch -D branch/conflict &&
git branch branch &&
git push testrepo :refs/heads/branch/conflict refs/heads/branch
'
test_expect_success 'pushing non-commit objects should report error' '
test_when_finished "rm -rf dest repo" &&
git init dest &&
git init repo &&
(
cd repo &&
test_commit --annotate test &&
tagsha=$(git rev-parse test^{tag}) &&
test_must_fail git push ../dest "$tagsha:refs/heads/branch" 2>err &&
test_grep "! \[remote rejected\] $tagsha -> branch (invalid new value provided)" err &&
test_grep "trying to write non-commit object $tagsha to branch ${SQ}refs/heads/branch${SQ}" err
)
'
test_done
|
unknown
|
github
|
https://github.com/git/git
|
t/t5516-fetch-push.sh
|
import newrelic.agent
from treeherder.etl.common import make_request
from .artifactbuilders import (BuildbotJobArtifactBuilder,
BuildbotLogViewArtifactBuilder,
BuildbotPerformanceDataArtifactBuilder)
# Max log size in bytes we will download (prior to decompression).
MAX_DOWNLOAD_SIZE_IN_BYTES = 5 * 1024 * 1024
class ArtifactBuilderCollection:
"""
Run a log through a collection of Artifact Builders to generate artifacts.
Architecture
============
ArtifactBuilderCollection
------------------
* Holds one or more instances of ``ArtifactBuilderBase``
* If ``builders`` passed in, uses those as the artifact
builders, otherwise creates the default artifact builders.
* Reads the log from the log handle/url and walks each line
calling into each artifact builder with each line for handling
* Maintains no state
ArtifactBuilderBase
-------------
* Base class for all artifact builders`.
* Manages:
* artifact
* line number
* parser
* Passes lines the ``Parser``
BuildbotLogViewArtifactBuilder
-------------
* Parses out content for use in a visual Log Parser
* Parsers:
* StepParser, which has its own ErrorParser
BuildbotJobArtifactBuilder
-------------
* Builds an artifact for the Treeherder job details panel
* Parsers:
* TinderboxPrintParser
BuildbotPerformanceDataArtifactBuilder
-------------
* Builds an artifact from performance data
* Parsers:
* PerformanceParser
"""
def __init__(self, url, builders=None):
"""
``url`` - url of the log to be parsed
``builders`` - ArtifactBuilder instances to generate artifacts.
In omitted, use defaults.
"""
self.url = url
self.artifacts = {}
if builders:
# ensure that self.builders is a list, even if a single parser was
# passed in
if not isinstance(builders, list):
builders = [builders]
self.builders = builders
else:
# use the defaults
self.builders = [
BuildbotLogViewArtifactBuilder(url=self.url),
BuildbotJobArtifactBuilder(url=self.url),
BuildbotPerformanceDataArtifactBuilder(url=self.url)
]
def parse(self):
"""
Iterate over each line of the log, running each parser against it.
Stream lines from the gzip file and run each parser against it,
building the ``artifact`` as we go.
"""
with make_request(self.url, stream=True) as response:
download_size_in_bytes = int(response.headers.get('Content-Length', -1))
# Temporary annotation of log size to help set thresholds in bug 1295997.
newrelic.agent.add_custom_parameter(
'unstructured_log_size',
download_size_in_bytes
)
newrelic.agent.add_custom_parameter(
'unstructured_log_encoding',
response.headers.get('Content-Encoding', 'None')
)
if download_size_in_bytes > MAX_DOWNLOAD_SIZE_IN_BYTES:
raise LogSizeException('Download size of %i bytes exceeds limit' % download_size_in_bytes)
# Lines must be explicitly decoded since `iter_lines()`` returns bytes by default
# and we cannot use its `decode_unicode=True` mode, since otherwise Unicode newline
# characters such as `\u0085` (which can appear in test output) are treated the same
# as `\n` or `\r`, and so split into unwanted additional lines by `iter_lines()`.
for line in response.iter_lines():
for builder in self.builders:
# Using `replace` to prevent malformed unicode (which might possibly exist
# in test message output) from breaking parsing of the rest of the log.
builder.parse_line(line.decode('utf-8', 'replace'))
# gather the artifacts from all builders
for builder in self.builders:
# Run end-of-parsing actions for this parser,
# in case the artifact needs clean-up/summarising.
builder.finish_parse()
name = builder.name
artifact = builder.get_artifact()
if name == 'performance_data' and not artifact[name]:
continue
self.artifacts[name] = artifact
class LogSizeException(Exception):
pass
|
unknown
|
codeparrot/codeparrot-clean
| ||
#include "BenchmarkHeader.h"
|
cpp
|
github
|
https://github.com/llvm/llvm-project
|
clang-tools-extra/clangd/test/Inputs/BenchmarkSource.cpp
|
"""Custom Middleware."""
# Standard
import time
import logging
from threading import local
# Cust object
_appt_thread_local = local()
LOG = logging.getLogger(__name__)
class MiddlwareBase(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
return self.get_response(request)
class SimpleProfilingMiddleware(MiddlwareBase):
"""Middleware which logs the time taken to render a page."""
def process_request(self, request):
request.start_time = time.time()
def process_response(self, request, response):
if hasattr(request, 'start_time'):
LOG.info('{0} took {1:0.4f} seconds: '.format(
request.build_absolute_uri(),
time.time() - request.start_time,
))
return response
def set_request(request):
setattr(_appt_thread_local, 'request', request)
def get_request():
return getattr(_appt_thread_local, 'request', None)
class RequestMiddleware(MiddlwareBase):
"""Keep the request where it can be retrieved as needed"""
@staticmethod
def process_request(request):
set_request(request)
|
unknown
|
codeparrot/codeparrot-clean
| ||
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"links": [],
"panels": [
{
"autoMigrateFrom": "graph",
"id": 4,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "gdev-prometheus"
},
"editorMode": "builder",
"expr": "{\"a.utf8.metric 🤘\", job=\"prometheus-utf8\"}",
"instant": false,
"legendFormat": "__auto",
"range": true,
"refId": "A"
}
],
"type": "timeseries"
}
],
"schemaVersion": 13,
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {},
"timezone": "browser",
"title": "Dashboard with minimal graph panel settings",
"weekStart": ""
}
|
json
|
github
|
https://github.com/grafana/grafana
|
apps/dashboard/pkg/migration/testdata/output/single_version/v13.minimal_graph_config.v13.json
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import StringIO
import json
import os
import urlparse
import zipfile
from imghdr import what
import requests
from lxml import etree
import gisdata
from django.conf import settings
from django.contrib.staticfiles.templatetags import staticfiles
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.test import LiveServerTestCase, TestCase
from geonode import qgis_server
from geonode.decorators import on_ogc_backend
from geonode.layers.utils import file_upload
from geonode.maps.models import Map
from geonode.qgis_server.helpers import wms_get_capabilities_url, style_list
class DefaultViewsTest(TestCase):
def setUp(self):
call_command('loaddata', 'people_data', verbosity=0)
@on_ogc_backend(qgis_server.BACKEND_PACKAGE)
def test_default_context(self):
"""Test default context provided by qgis_server."""
response = self.client.get('/')
context = response.context
# Necessary context to ensure compatibility with views
# Some view needs these context to do some javascript logic.
self.assertIn('UPLOADER_URL', context)
self.assertIn('MAPFISH_PRINT_ENABLED', context)
self.assertIn('PRINT_NG_ENABLED', context)
self.assertIn('GEONODE_SECURITY_ENABLED', context)
self.assertIn('GEOGIG_ENABLED', context)
self.assertIn('TIME_ENABLED', context)
self.assertIn('MOSAIC_ENABLED', context)
class QGISServerViewsTest(LiveServerTestCase):
def setUp(self):
call_command('loaddata', 'people_data', verbosity=0)
@on_ogc_backend(qgis_server.BACKEND_PACKAGE)
def test_ogc_specific_layer(self):
"""Test we can use QGIS Server API for a layer.
For now, we are just checking we can call these views without any
exceptions. We should improve this test by checking the result.
"""
filename = os.path.join(gisdata.GOOD_DATA, 'raster/test_grid.tif')
uploaded = file_upload(filename)
filename = os.path.join(
gisdata.GOOD_DATA,
'vector/san_andres_y_providencia_administrative.shp')
vector_layer = file_upload(filename)
params = {'layername': uploaded.name}
# Zip
response = self.client.get(
reverse('qgis_server:download-zip', kwargs=params))
self.assertEqual(response.status_code, 200)
try:
f = StringIO.StringIO(response.content)
zipped_file = zipfile.ZipFile(f, 'r')
for one_file in zipped_file.namelist():
# We shoudn't get any QGIS project
self.assertFalse(one_file.endswith('.qgs'))
self.assertIsNone(zipped_file.testzip())
finally:
zipped_file.close()
f.close()
# Legend
response = self.client.get(
reverse('qgis_server:legend', kwargs=params))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Type'), 'image/png')
self.assertEqual(what('', h=response.content), 'png')
# Tile
coordinates = {'z': '11', 'x': '1576', 'y': '1054'}
coordinates.update(params)
response = self.client.get(
reverse('qgis_server:tile', kwargs=coordinates))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Type'), 'image/png')
self.assertEqual(what('', h=response.content), 'png')
# Tile 404
response = self.client.get(
reverse('qgis_server:tile', kwargs=params))
self.assertEqual(response.status_code, 404)
self.assertEqual(
response.get('Content-Type'), 'text/html; charset=utf-8')
# Geotiff
response = self.client.get(
reverse('qgis_server:geotiff', kwargs=params))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Type'), 'image/tiff')
self.assertEqual(what('', h=response.content), 'tiff')
# Layer is already on the database
# checking the Link
links = uploaded.link_set.download().filter(
name__in=settings.DOWNLOAD_FORMATS_RASTER)
# checks signals.py for the hardcoded names in QLR and QGS
qlr_link = links.get(name='QGIS layer file (.qlr)')
self.assertIn("download-qlr", qlr_link.url)
qgs_link = links.get(name='QGIS project file (.qgs)')
self.assertIn("download-qgs", qgs_link.url)
# QLR
response = self.client.get(
reverse('qgis_server:download-qlr', kwargs=params))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.get('Content-Type'),
'application/x-qgis-layer-definition')
# check file name's extension
file_name = response.get('Content-Disposition').split('filename=')
file_ext = file_name[1].split('.')
self.assertEqual(file_ext[1], "qlr")
# QGS
response = self.client.get(
reverse('qgis_server:download-qgs', kwargs=params))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.get('Content-Type'),
'application/x-qgis-project')
# check file name's extension
file_name = response.get('Content-Disposition').split('filename=')
file_ext = file_name[1].split('.')
self.assertEqual(file_ext[1], "qgs")
response = self.client.get(
reverse('qgis_server:geotiff', kwargs={
'layername': vector_layer.name
}))
self.assertEqual(response.status_code, 404)
# QML Styles
# Request list of styles
response = self.client.get(
reverse('qgis_server:download-qml', kwargs=params))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Type'), 'application/json')
# Should return a default style list
actual_result = json.loads(response.content)
actual_result = [s['name'] for s in actual_result]
expected_result = ['default']
self.assertEqual(set(expected_result), set(actual_result))
# Get single styles
response = self.client.get(
reverse('qgis_server:download-qml', kwargs={
'layername': params['layername'],
'style_name': 'default'
}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Type'), 'text/xml')
# Set thumbnail from viewed bbox
response = self.client.get(
reverse('qgis_server:set-thumbnail', kwargs=params))
self.assertEqual(response.status_code, 400)
data = {
'bbox': '-5.54025,96.9406,-5.2820,97.1250'
}
response = self.client.post(
reverse('qgis_server:set-thumbnail', kwargs=params),
data=data)
# User dont have permission
self.assertEqual(response.status_code, 403)
# Should log in
self.client.login(username='admin', password='admin')
response = self.client.post(
reverse('qgis_server:set-thumbnail', kwargs=params),
data=data)
self.assertEqual(response.status_code, 200)
retval = json.loads(response.content)
expected_retval = {
'success': True
}
self.assertEqual(retval, expected_retval)
# OGC Server specific for THE layer
query_string = {
'SERVICE': 'WMS',
'VERSION': '1.3.0',
'REQUEST': 'GetLegendGraphics',
'FORMAT': 'image/png',
'LAYERS': uploaded.name,
}
response = self.client.get(
reverse('qgis_server:layer-request', kwargs=params), query_string)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Type'), 'image/png')
self.assertEqual(what('', h=response.content), 'png')
# OGC Server for the Geonode instance
# GetLegendGraphics is a shortcut when using the main OGC server.
query_string = {
'SERVICE': 'WMS',
'VERSION': '1.3.0',
'REQUEST': 'GetLegendGraphics',
'FORMAT': 'image/png',
'LAYERS': uploaded.name,
}
response = self.client.get(
reverse('qgis_server:request'), query_string)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Type'), 'image/png')
self.assertEqual(what('', h=response.content), 'png')
# WMS GetCapabilities
query_string = {
'SERVICE': 'WMS',
'VERSION': '1.3.0',
'REQUEST': 'GetCapabilities'
}
response = self.client.get(
reverse('qgis_server:request'), query_string)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(
response.content, 'GetCapabilities is not supported yet.')
query_string['LAYERS'] = uploaded.name
response = self.client.get(
reverse('qgis_server:request'), query_string)
get_capabilities_content = response.content
# Check xml content
self.assertEqual(response.status_code, 200, response.content)
root = etree.fromstring(response.content)
layer_xml = root.xpath(
'wms:Capability/wms:Layer/wms:Layer/wms:Name',
namespaces={'wms': 'http://www.opengis.net/wms'})
self.assertEqual(len(layer_xml), 1)
self.assertEqual(layer_xml[0].text, uploaded.name)
# GetLegendGraphic request returned must be valid
layer_xml = root.xpath(
'wms:Capability/wms:Layer/'
'wms:Layer/wms:Style/wms:LegendURL/wms:OnlineResource',
namespaces={
'xlink': 'http://www.w3.org/1999/xlink',
'wms': 'http://www.opengis.net/wms'
})
legend_url = layer_xml[0].attrib[
'{http://www.w3.org/1999/xlink}href']
response = self.client.get(legend_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Content-Type'), 'image/png')
self.assertEqual(what('', h=response.content), 'png')
# Check get capabilities using helper returns the same thing
response = requests.get(wms_get_capabilities_url(
uploaded, internal=False))
self.assertEqual(response.status_code, 200)
self.assertEqual(get_capabilities_content, response.content)
# WMS GetMap
query_string = {
'SERVICE': 'WMS',
'VERSION': '1.3.0',
'REQUEST': 'GetMap',
'FORMAT': 'image/png',
'LAYERS': uploaded.name,
'HEIGHT': 250,
'WIDTH': 250,
'SRS': 'EPSG:4326',
'BBOX': '-5.54025,96.9406,-5.2820,97.1250',
}
response = self.client.get(
reverse('qgis_server:request'), query_string)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(
response.get('Content-Type'), 'image/png', response.content)
self.assertEqual(what('', h=response.content), 'png')
# End of the test, we should remove every files related to the test.
uploaded.delete()
vector_layer.delete()
@on_ogc_backend(qgis_server.BACKEND_PACKAGE)
def test_download_map_qlr(self):
"""Test download QLR file for a map"""
# 2 layers to be added to the map
filename = os.path.join(
gisdata.GOOD_DATA, 'raster/relief_san_andres.tif')
layer1 = file_upload(filename)
filename = os.path.join(
gisdata.GOOD_DATA,
'vector/san_andres_y_providencia_administrative.shp')
layer2 = file_upload(filename)
# construct json request for new map
json_payload = InitialSetup.generate_initial_map(layer1, layer2)
self.client.login(username='admin', password='admin')
response = self.client.post(
reverse('new_map_json'),
data=json.dumps(json_payload),
content_type='application/json')
# map is successfully saved
self.assertEqual(response.status_code, 200)
map_id = json.loads(response.content).get('id')
map = Map.objects.get(id=map_id)
# check that QLR is added to the links
links = map.link_set.download()
map_qlr_link = links.get(name='Download QLR Layer file')
self.assertIn('qlr', map_qlr_link.url)
# QLR
response = self.client.get(
reverse('map_download_qlr', kwargs={'mapid': map_id}))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.get('Content-Type'),
'application/x-qgis-layer-definition')
# cleanup
map.delete()
layer1.delete()
layer2.delete()
@on_ogc_backend(qgis_server.BACKEND_PACKAGE)
def test_map_json(self):
# 2 layers to be added to the map
filename = os.path.join(
gisdata.GOOD_DATA, 'raster/relief_san_andres.tif')
layer1 = file_upload(filename)
filename = os.path.join(
gisdata.GOOD_DATA,
'vector/san_andres_y_providencia_administrative.shp')
layer2 = file_upload(filename)
json_payload = InitialSetup.generate_initial_map(layer1, layer2)
# First, create a map with two layers
# Need to log in for saving a map
self.client.login(username='admin', password='admin')
result_new_map = self.client.post(
reverse('new_map_json'),
json.dumps(json_payload),
content_type='application/json')
# the new map is successfully saved
self.assertEqual(result_new_map.status_code, 200)
map_id = json.loads(result_new_map.content).get('id')
# try to remove one layer
layers = json_payload['map']['layers']
before_remove = len(layers)
after_remove = before_remove - 1
layer = layers[0]
layers.remove(layer)
# check if the layer is eliminated from the map
result_update_map = self.client.post(
reverse('map_json', kwargs={'mapid': map_id}),
data=json.dumps(json_payload),
content_type='application/json')
# successfully updated
self.assertEqual(result_update_map.status_code, 200)
# the number of layers on the map decrease by 1
self.assertEqual(
len(result_update_map.context_data['map'].layers),
after_remove)
# clean up
map = Map.objects.get(id=map_id)
map.delete()
layer1.delete()
layer2.delete()
class QGISServerStyleManagerTest(LiveServerTestCase):
def setUp(self):
call_command('loaddata', 'people_data', verbosity=0)
def data_path(self, path):
project_root = os.path.abspath(settings.PROJECT_ROOT)
return os.path.join(
project_root, 'qgis_server/tests/data', path)
@on_ogc_backend(qgis_server.BACKEND_PACKAGE)
def test_list_style(self):
"""Test querying list of styles from QGIS Server."""
filename = os.path.join(gisdata.GOOD_DATA, 'raster/test_grid.tif')
layer = file_upload(filename)
""":type: geonode.layers.models.Layer"""
actual_list_style = style_list(layer, internal=False)
expected_list_style = ['default']
# There will be a default style
if actual_list_style:
self.assertEqual(
set(expected_list_style),
set([style.name for style in actual_list_style]))
style_list_url = reverse(
'qgis_server:download-qml',
kwargs={
'layername': layer.name
})
response = self.client.get(style_list_url)
self.assertEqual(response.status_code, 200)
actual_list_style = json.loads(response.content)
# There will be a default style
self.assertEqual(
set(expected_list_style),
set([style['name'] for style in actual_list_style]))
layer.delete()
@on_ogc_backend(qgis_server.BACKEND_PACKAGE)
def test_add_delete_style(self):
"""Test add new style using qgis_server views."""
filename = os.path.join(gisdata.GOOD_DATA, 'raster/test_grid.tif')
layer = file_upload(filename)
""":type: geonode.layers.models.Layer"""
self.client.login(username='admin', password='admin')
qml_path = self.data_path('test_grid.qml')
add_style_url = reverse(
'qgis_server:upload-qml',
kwargs={
'layername': layer.name})
with open(qml_path) as file_handle:
form_data = {
'name': 'new_style',
'title': 'New Style',
'qml': file_handle
}
response = self.client.post(
add_style_url,
data=form_data)
self.assertEqual(response.status_code, 201)
actual_list_style = style_list(layer, internal=False)
if actual_list_style:
expected_list_style = ['default', 'new_style']
self.assertEqual(
set(expected_list_style),
set([style.name for style in actual_list_style]))
# Test delete request
delete_style_url = reverse(
'qgis_server:remove-qml',
kwargs={
'layername': layer.name,
'style_name': 'default'})
response = self.client.delete(delete_style_url)
self.assertEqual(response.status_code, 200)
actual_list_style = style_list(layer, internal=False)
if actual_list_style:
expected_list_style = ['new_style']
self.assertEqual(
set(expected_list_style),
set([style.name for style in actual_list_style]))
# Check new default
default_style_url = reverse(
'qgis_server:default-qml',
kwargs={
'layername': layer.name})
response = self.client.get(default_style_url)
self.assertEqual(response.status_code, 200)
expected_default_style_retval = {
'name': 'new_style',
}
actual_default_style_retval = json.loads(response.content)
for key, value in expected_default_style_retval.iteritems():
self.assertEqual(actual_default_style_retval[key], value)
layer.delete()
class ThumbnailGenerationTest(LiveServerTestCase):
def setUp(self):
call_command('loaddata', 'people_data', verbosity=0)
@on_ogc_backend(qgis_server.BACKEND_PACKAGE)
def test_thumbnail_links(self):
"""Test that thumbnail links were created after upload."""
filename = os.path.join(gisdata.GOOD_DATA, 'raster/test_grid.tif')
layer = file_upload(filename)
""":type: geonode.layers.models.Layer"""
# check that we have remote thumbnail
remote_thumbnail_link = layer.link_set.get(
name__icontains='remote thumbnail')
self.assertTrue(remote_thumbnail_link.url)
# thumbnail won't generate because remote thumbnail uses public
# address
remote_thumbnail_url = remote_thumbnail_link.url
# Replace url's basename, we want to access it using django client
parse_result = urlparse.urlsplit(remote_thumbnail_url)
remote_thumbnail_url = urlparse.urlunsplit(
('', '', parse_result.path, parse_result.query, ''))
response = self.client.get(remote_thumbnail_url)
thumbnail_dir = os.path.join(settings.MEDIA_ROOT, 'thumbs')
thumbnail_path = os.path.join(thumbnail_dir, 'layer-thumb.png')
layer.save_thumbnail(thumbnail_path, response.content)
# Check thumbnail created
self.assertTrue(os.path.exists(thumbnail_path))
self.assertEqual(what(thumbnail_path), 'png')
# Check that now we have thumbnail
self.assertTrue(layer.has_thumbnail())
missing_thumbnail_url = staticfiles.static(settings.MISSING_THUMBNAIL)
self.assertTrue(layer.get_thumbnail_url() != missing_thumbnail_url)
thumbnail_links = layer.link_set.filter(name__icontains='thumbnail')
self.assertTrue(len(thumbnail_links) > 0)
link_names = ['remote thumbnail', 'thumbnail']
for link in thumbnail_links:
self.assertIn(link.name.lower(), link_names)
# cleanup
layer.delete()
@on_ogc_backend(qgis_server.BACKEND_PACKAGE)
def test_map_thumbnail(self):
"""Creating map will create thumbnail."""
filename = os.path.join(
gisdata.GOOD_DATA, 'raster/relief_san_andres.tif')
layer1 = file_upload(filename)
filename = os.path.join(
gisdata.GOOD_DATA,
'vector/san_andres_y_providencia_administrative.shp')
layer2 = file_upload(filename)
""":type: geonode.layers.models.Layer"""
# construct json request for new map
json_payload = InitialSetup.generate_initial_map(layer1, layer2)
self.client.login(username='admin', password='admin')
response = self.client.post(
reverse('new_map_json'),
json.dumps(json_payload),
content_type='application/json')
self.assertEqual(response.status_code, 200)
map_id = json.loads(response.content).get('id')
map = Map.objects.get(id=map_id)
# check that we have remote thumbnail
remote_thumbnail_link = map.link_set.filter(
name__icontains='remote thumbnail').first()
self.assertTrue(remote_thumbnail_link.url)
# thumbnail won't generate because remote thumbnail uses public
# address
remote_thumbnail_url = remote_thumbnail_link.url
# Replace url's basename, we want to access it using django client
parse_result = urlparse.urlsplit(remote_thumbnail_url)
remote_thumbnail_url = urlparse.urlunsplit(
('', '', parse_result.path, parse_result.query, ''))
response = self.client.get(remote_thumbnail_url)
thumbnail_dir = os.path.join(settings.MEDIA_ROOT, 'thumbs')
thumbnail_path = os.path.join(thumbnail_dir, 'map-thumb.png')
map.save_thumbnail(thumbnail_path, response.content)
# Check thumbnail created
self.assertTrue(os.path.exists(thumbnail_path))
self.assertEqual(what(thumbnail_path), 'png')
# Check that now we have thumbnail
self.assertTrue(map.has_thumbnail())
missing_thumbnail_url = staticfiles.static(settings.MISSING_THUMBNAIL)
self.assertTrue(map.get_thumbnail_url() != missing_thumbnail_url)
thumbnail_links = map.link_set.filter(name__icontains='thumbnail')
self.assertTrue(len(thumbnail_links) > 0)
link_names = ['remote thumbnail', 'thumbnail']
for link in thumbnail_links:
self.assertIn(link.name.lower(), link_names)
# cleanup
map.delete()
layer1.delete()
layer2.delete()
class InitialSetup():
@classmethod
def generate_initial_map(cls, layer1, layer2):
# construct json request for new map
json_payload = {
"sources": {
"source_OpenMapSurfer Roads": {
"url": "http://korona.geog.uni-heidelberg.de/tiles"
"/roads/x={x}&y={y}&z={z}"
},
"source_OpenStreetMap": {
"url": "http://{s}.tile.osm.org/{z}/{x}/{y}.png"
},
"source_san_andres_y_providencia_administrative": {
"url": "http://geonode.dev/qgis-server/tiles"
"/san_andres_y_providencia_administrative/"
"{z}/{x}/{y}.png"
},
"source_relief_san_andres": {
"url": "http://geonode.dev/qgis-server/tiles"
"/relief_san_andres/{z}/{x}/{y}.png"
}
},
"about": {
"title": "San Andreas",
"abstract": "San Andreas sample map"
},
"map": {
"center": [12.91890657418042, -81.298828125],
"zoom": 6,
"projection": "",
"layers": [
{
"name": "OpenMapSurfer_Roads",
"title": "OpenMapSurfer Roads",
"visibility": True,
"url": "http://korona.geog.uni-heidelberg.de/tiles/"
"roads/x={x}&y={y}&z={z}",
"group": "background",
"source": "source_OpenMapSurfer Roads"
},
{
"name": "osm",
"title": "OpenStreetMap",
"visibility": False,
"url": "http://{s}.tile.osm.org/{z}/{x}/{y}.png",
"group": "background",
"source": "source_OpenStreetMap"
},
{
"name": layer2.alternate,
"title": layer2.name,
"visibility": True,
"url": "http://geonode.dev/qgis-server/tiles"
"/san_andres_y_providencia_administrative/"
"{z}/{x}/{y}.png",
"source": "source_"
"san_andres_y_providencia_administrative"
},
{
"name": layer1.alternate,
"title": layer1.name,
"visibility": True,
"url": "http://geonode.dev/qgis-server/tiles"
"/relief_san_andres/{z}/{x}/{y}.png",
"source": "source_relief_san_andres"
}
]
}
}
return json_payload
|
unknown
|
codeparrot/codeparrot-clean
| ||
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.oncrpc;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.util.Preconditions;
/**
* Utility class for building XDR messages based on RFC 4506.
*
* Key points of the format:
*
* <ul>
* <li>Primitives are stored in big-endian order (i.e., the default byte order
* of ByteBuffer).</li>
* <li>Booleans are stored as an integer.</li>
* <li>Each field in the message is always aligned by 4.</li>
* </ul>
*
*/
public final class XDR {
private static final int DEFAULT_INITIAL_CAPACITY = 256;
private static final int SIZEOF_INT = 4;
private static final int SIZEOF_LONG = 8;
private static final byte[] PADDING_BYTES = new byte[] { 0, 0, 0, 0 };
private ByteBuffer buf;
public enum State {
READING, WRITING,
}
private final State state;
/**
* Construct a new XDR message buffer.
*
* @param initialCapacity
* the initial capacity of the buffer.
*/
public XDR(int initialCapacity) {
this(ByteBuffer.allocate(initialCapacity), State.WRITING);
}
public XDR() {
this(DEFAULT_INITIAL_CAPACITY);
}
public XDR(ByteBuffer buf, State state) {
this.buf = buf;
this.state = state;
}
/**
* Wraps a byte array as a read-only XDR message. There's no copy involved,
* thus it is the client's responsibility to ensure that the byte array
* remains unmodified when using the XDR object.
*
* @param src
* the byte array to be wrapped.
*/
public XDR(byte[] src) {
this(ByteBuffer.wrap(src).asReadOnlyBuffer(), State.READING);
}
public XDR asReadOnlyWrap() {
ByteBuffer b = buf.asReadOnlyBuffer();
if (state == State.WRITING) {
b.flip();
}
XDR n = new XDR(b, State.READING);
return n;
}
public ByteBuffer buffer() {
return buf.duplicate();
}
public int size() {
// TODO: This overloading intends to be compatible with the semantics of
// the previous version of the class. This function should be separated into
// two with clear semantics.
return state == State.READING ? buf.limit() : buf.position();
}
public int readInt() {
Preconditions.checkState(state == State.READING);
return buf.getInt();
}
public void writeInt(int v) {
ensureFreeSpace(SIZEOF_INT);
buf.putInt(v);
}
public boolean readBoolean() {
Preconditions.checkState(state == State.READING);
return buf.getInt() != 0;
}
public void writeBoolean(boolean v) {
ensureFreeSpace(SIZEOF_INT);
buf.putInt(v ? 1 : 0);
}
public long readHyper() {
Preconditions.checkState(state == State.READING);
return buf.getLong();
}
public void writeLongAsHyper(long v) {
ensureFreeSpace(SIZEOF_LONG);
buf.putLong(v);
}
public byte[] readFixedOpaque(int size) {
Preconditions.checkState(state == State.READING);
byte[] r = new byte[size];
buf.get(r);
alignPosition();
return r;
}
public void writeFixedOpaque(byte[] src, int length) {
ensureFreeSpace(alignUp(length));
buf.put(src, 0, length);
writePadding();
}
public void writeFixedOpaque(byte[] src) {
writeFixedOpaque(src, src.length);
}
public byte[] readVariableOpaque() {
Preconditions.checkState(state == State.READING);
int size = readInt();
return readFixedOpaque(size);
}
public void writeVariableOpaque(byte[] src) {
ensureFreeSpace(SIZEOF_INT + alignUp(src.length));
buf.putInt(src.length);
writeFixedOpaque(src);
}
public String readString() {
return new String(readVariableOpaque(), StandardCharsets.UTF_8);
}
public void writeString(String s) {
writeVariableOpaque(s.getBytes(StandardCharsets.UTF_8));
}
private void writePadding() {
Preconditions.checkState(state == State.WRITING);
int p = pad(buf.position());
ensureFreeSpace(p);
buf.put(PADDING_BYTES, 0, p);
}
private int alignUp(int length) {
return length + pad(length);
}
private int pad(int length) {
switch (length % 4) {
case 1:
return 3;
case 2:
return 2;
case 3:
return 1;
default:
return 0;
}
}
private void alignPosition() {
buf.position(alignUp(buf.position()));
}
private void ensureFreeSpace(int size) {
Preconditions.checkState(state == State.WRITING);
if (buf.remaining() < size) {
int newCapacity = buf.capacity() * 2;
int newRemaining = buf.capacity() + buf.remaining();
while (newRemaining < size) {
newRemaining += newCapacity;
newCapacity *= 2;
}
ByteBuffer newbuf = ByteBuffer.allocate(newCapacity);
buf.flip();
newbuf.put(buf);
buf = newbuf;
}
}
/**
* check if the rest of data has more than len bytes.
* @param xdr XDR message
* @param len minimum remaining length
* @return specify remaining length is enough or not
*/
public static boolean verifyLength(XDR xdr, int len) {
return xdr.buf.remaining() >= len;
}
static byte[] recordMark(int size, boolean last) {
byte[] b = new byte[SIZEOF_INT];
ByteBuffer buf = ByteBuffer.wrap(b);
buf.putInt(!last ? size : size | 0x80000000);
return b;
}
/**
* Write an XDR message to a TCP ChannelBuffer.
* @param request XDR request
* @param last specifies last request or not
* @return TCP buffer
*/
public static ByteBuf writeMessageTcp(XDR request, boolean last) {
Preconditions.checkState(request.state == XDR.State.WRITING);
ByteBuffer b = request.buf.duplicate();
b.flip();
byte[] fragmentHeader = XDR.recordMark(b.limit(), last);
ByteBuffer headerBuf = ByteBuffer.wrap(fragmentHeader);
// TODO: Investigate whether making a copy of the buffer is necessary.
return Unpooled.wrappedBuffer(headerBuf, b);
}
/**
* Write an XDR message to a UDP ChannelBuffer.
* @param response XDR response
* @return UDP buffer
*/
public static ByteBuf writeMessageUdp(XDR response) {
Preconditions.checkState(response.state == XDR.State.READING);
// TODO: Investigate whether making a copy of the buffer is necessary.
return Unpooled.copiedBuffer(response.buf);
}
public static int fragmentSize(byte[] mark) {
ByteBuffer b = ByteBuffer.wrap(mark);
int n = b.getInt();
return n & 0x7fffffff;
}
public static boolean isLastFragment(byte[] mark) {
ByteBuffer b = ByteBuffer.wrap(mark);
int n = b.getInt();
return (n & 0x80000000) != 0;
}
@VisibleForTesting
public byte[] getBytes() {
ByteBuffer d = asReadOnlyWrap().buffer();
byte[] b = new byte[d.remaining()];
d.get(b);
return b;
}
}
|
java
|
github
|
https://github.com/apache/hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/oncrpc/XDR.java
|
//go:build windows
package libnetwork
import "context"
func (r *Resolver) setupNAT(context.Context) error {
return nil
}
|
go
|
github
|
https://github.com/moby/moby
|
daemon/libnetwork/resolver_windows.go
|
# Copyright (c) 2025 Baidu, Inc. and HuggingFace Inc. team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Ernie 4.5 model"""
import torch
from torch import nn
from ...modeling_rope_utils import dynamic_rope_update
from ...utils import auto_docstring, can_return_tuple
from ...utils.generic import maybe_autocast
from ..glm.modeling_glm import rotate_half
from ..llama.modeling_llama import (
LlamaAttention,
LlamaForCausalLM,
LlamaMLP,
)
from ..olmo.modeling_olmo import OlmoRotaryEmbedding
from .configuration_ernie4_5 import Ernie4_5Config
class Ernie4_5RotaryEmbedding(OlmoRotaryEmbedding):
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with maybe_autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
# keeping it in full precision
return cos, sin
def apply_rotary_pos_emb(q, k, cos, sin, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
# glm rope style (with full dim) and full precision
original_dtype = q.dtype
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
# Interleave them instead of usual shape
cos = cos[..., : cos.shape[-1] // 2].repeat_interleave(2, dim=-1)
sin = sin[..., : sin.shape[-1] // 2].repeat_interleave(2, dim=-1)
q_embed = (q.float() * cos) + (rotate_half(q).float() * sin)
k_embed = (k.float() * cos) + (rotate_half(k).float() * sin)
return q_embed.to(original_dtype), k_embed.to(original_dtype)
class Ernie4_5MLP(LlamaMLP):
def __init__(self, config: Ernie4_5Config):
super().__init__(config)
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias)
class Ernie4_5Attention(LlamaAttention):
def __init__(self, config: Ernie4_5Config, layer_idx: int):
super().__init__(config, layer_idx)
self.attention_dropout = 0.0
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.use_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.use_bias)
class Ernie4_5ForCausalLM(LlamaForCausalLM):
@can_return_tuple
@auto_docstring
def forward(self, **super_kwargs):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
super().forward(**super_kwargs)
__all__ = [
"Ernie4_5ForCausalLM",
"Ernie4_5Model", # noqa: F822
"Ernie4_5PreTrainedModel", # noqa: F822
]
|
python
|
github
|
https://github.com/huggingface/transformers
|
src/transformers/models/ernie4_5/modular_ernie4_5.py
|
"""Support for the Twitch stream status."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_GAME = 'game'
ATTR_TITLE = 'title'
CONF_CHANNELS = 'channels'
CONF_CLIENT_ID = 'client_id'
ICON = 'mdi:twitch'
STATE_OFFLINE = 'offline'
STATE_STREAMING = 'streaming'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CHANNELS, default=[]):
vol.All(cv.ensure_list, [cv.string]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Twitch platform."""
from twitch import TwitchClient
from requests.exceptions import HTTPError
channels = config.get(CONF_CHANNELS, [])
client = TwitchClient(client_id=config.get(CONF_CLIENT_ID))
try:
client.ingests.get_server_list()
except HTTPError:
_LOGGER.error("Client ID is not valid")
return
users = client.users.translate_usernames_to_ids(channels)
add_entities([TwitchSensor(user, client) for user in users], True)
class TwitchSensor(Entity):
"""Representation of an Twitch channel."""
def __init__(self, user, client):
"""Initialize the sensor."""
self._client = client
self._user = user
self._channel = self._user.name
self._id = self._user.id
self._state = STATE_OFFLINE
self._preview = self._game = self._title = None
@property
def should_poll(self):
"""Device should be polled."""
return True
@property
def name(self):
"""Return the name of the sensor."""
return self._channel
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def entity_picture(self):
"""Return preview of current game."""
return self._preview
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._state == STATE_STREAMING:
return {
ATTR_GAME: self._game,
ATTR_TITLE: self._title,
}
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
# pylint: disable=no-member
def update(self):
"""Update device state."""
stream = self._client.streams.get_stream_by_user(self._id)
if stream:
self._game = stream.get('channel').get('game')
self._title = stream.get('channel').get('status')
self._preview = stream.get('preview').get('medium')
self._state = STATE_STREAMING
else:
self._preview = self._client.users.get_by_id(self._id).get('logo')
self._state = STATE_OFFLINE
|
unknown
|
codeparrot/codeparrot-clean
| ||
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Optional
from typing_extensions import Literal, Required, TypedDict
__all__ = ["ResponseInputFileContentParam"]
class ResponseInputFileContentParam(TypedDict, total=False):
"""A file input to the model."""
type: Required[Literal["input_file"]]
"""The type of the input item. Always `input_file`."""
file_data: Optional[str]
"""The base64-encoded data of the file to be sent to the model."""
file_id: Optional[str]
"""The ID of the file to be sent to the model."""
file_url: Optional[str]
"""The URL of the file to be sent to the model."""
filename: Optional[str]
"""The name of the file to be sent to the model."""
|
python
|
github
|
https://github.com/openai/openai-python
|
src/openai/types/responses/response_input_file_content_param.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import test
from neutron.tests.tempest.api import base
class SubnetsSearchCriteriaTest(base.BaseSearchCriteriaTest):
resource = 'subnet'
list_kwargs = {'shared': False}
@classmethod
def resource_setup(cls):
super(SubnetsSearchCriteriaTest, cls).resource_setup()
net = cls.create_network(network_name='subnet-search-test-net')
for name in cls.resource_names:
cls.create_subnet(net, name=name)
@test.idempotent_id('d2d61995-5dd5-4b93-bce7-3edefdb79563')
def test_list_sorts_asc(self):
self._test_list_sorts_asc()
@test.idempotent_id('c3c6b0af-c4ac-4da0-b568-8d08ae550604')
def test_list_sorts_desc(self):
self._test_list_sorts_desc()
@test.idempotent_id('b93063b3-f713-406e-bf93-e5738e09153c')
def test_list_pagination(self):
self._test_list_pagination()
@test.idempotent_id('2ddd9aa6-de28-410f-9cbc-ce752893c407')
def test_list_pagination_with_marker(self):
self._test_list_pagination_with_marker()
@test.idempotent_id('351183ef-6ed9-4d71-a9f2-a5ac049bd7ea')
def test_list_pagination_with_href_links(self):
self._test_list_pagination_with_href_links()
@test.idempotent_id('dfaa20ca-6d84-4f26-962f-2fee4d247cd9')
def test_list_pagination_page_reverse_asc(self):
self._test_list_pagination_page_reverse_asc()
@test.idempotent_id('40552213-3e12-4d6a-86f3-dda92f3de88c')
def test_list_pagination_page_reverse_desc(self):
self._test_list_pagination_page_reverse_desc()
@test.idempotent_id('3cea9053-a731-4480-93ee-19b2c28a9ce4')
def test_list_pagination_page_reverse_with_href_links(self):
self._test_list_pagination_page_reverse_with_href_links()
@test.idempotent_id('d851937c-9821-4b46-9d18-43e9077ecac0')
def test_list_no_pagination_limit_0(self):
self._test_list_no_pagination_limit_0()
@test.idempotent_id('c0f9280b-9d81-4728-a967-6be22659d4c8')
def test_list_validation_filters(self):
self._test_list_validation_filters()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# (c) 2017, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: sf_volume_manager
short_description: Manage SolidFire volumes
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.3'
author: Sumit Kumar (sumit4@netapp.com)
description:
- Create, destroy, or update volumes on SolidFire
options:
state:
description:
- Whether the specified volume should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the volume to manage.
required: true
account_id:
description:
- Account ID for the owner of this volume.
required: true
512emulation:
description:
- Should the volume provide 512-byte sector emulation?
- Required when C(state=present)
required: false
qos:
description: Initial quality of service settings for this volume.
required: false
default: None
attributes:
description: A YAML dictionary of attributes that you would like to apply on this volume.
required: false
default: None
volume_id:
description:
- The ID of the volume to manage or update.
- In order to create multiple volumes with the same name, but different volume_ids, please declare the I(volume_id)
parameter with an arbitrary value. However, the specified volume_id will not be assigned to the newly created
volume (since it's an auto-generated property).
required: false
default: None
size:
description:
- The size of the volume in (size_unit).
- Required when C(state = present).
required: false
size_unit:
description:
- The unit used to interpret the size parameter.
required: false
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
access:
required: false
choices: ['readOnly', 'readWrite', 'locked', 'replicationTarget']
description:
- "Access allowed for the volume."
- "readOnly: Only read operations are allowed."
- "readWrite: Reads and writes are allowed."
- "locked: No reads or writes are allowed."
- "replicationTarget: Identify a volume as the target volume for a paired set of volumes. If the volume is not paired, the access status is locked."
- "If unspecified, the access settings of the clone will be the same as the source."
default: None
'''
EXAMPLES = """
- name: Create Volume
sf_volume_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: AnsibleVol
account_id: 3
enable512e: False
size: 1
size_unit: gb
- name: Update Volume
sf_volume_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: AnsibleVol
account_id: 3
access: readWrite
- name: Delete Volume
sf_volume_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: absent
name: AnsibleVol
account_id: 2
"""
RETURN = """
msg:
description: Success message
returned: success
type: string
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
import ansible.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class SolidFireVolume(object):
def __init__(self):
self._size_unit_map = netapp_utils.SF_BYTE_MAP
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
account_id=dict(required=True, type='int'),
enable512e=dict(type='bool', aliases=['512emulation']),
qos=dict(required=False, type='str', default=None),
attributes=dict(required=False, type='dict', default=None),
volume_id=dict(type='int', default=None),
size=dict(type='int'),
size_unit=dict(default='gb',
choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
'pb', 'eb', 'zb', 'yb'], type='str'),
access=dict(required=False, type='str', default=None, choices=['readOnly', 'readWrite',
'locked', 'replicationTarget']),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['size', 'enable512e'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.account_id = p['account_id']
self.enable512e = p['enable512e']
self.qos = p['qos']
self.attributes = p['attributes']
self.volume_id = p['volume_id']
self.size_unit = p['size_unit']
if p['size'] is not None:
self.size = p['size'] * self._size_unit_map[self.size_unit]
else:
self.size = None
self.access = p['access']
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
def get_volume(self):
"""
Return volume object if found
:return: Details about the volume. None if not found.
:rtype: dict
"""
volume_list = self.sfe.list_volumes_for_account(account_id=self.account_id)
for volume in volume_list.volumes:
if volume.name == self.name:
# Update self.volume_id
if self.volume_id is not None:
if volume.volume_id == self.volume_id and str(volume.delete_time) == "":
return volume
else:
if str(volume.delete_time) == "":
self.volume_id = volume.volume_id
return volume
return None
def create_volume(self):
try:
self.sfe.create_volume(name=self.name,
account_id=self.account_id,
total_size=self.size,
enable512e=self.enable512e,
qos=self.qos,
attributes=self.attributes)
except:
err = get_exception()
self.module.fail_json(msg="Error provisioning volume %s of size %s" % (self.name, self.size),
exception=str(err))
def delete_volume(self):
try:
self.sfe.delete_volume(volume_id=self.volume_id)
except:
err = get_exception()
self.module.fail_json(msg="Error deleting volume %s" % self.volume_id,
exception=str(err))
def update_volume(self):
try:
self.sfe.modify_volume(self.volume_id,
account_id=self.account_id,
access=self.access,
qos=self.qos,
total_size=self.size,
attributes=self.attributes)
except:
err = get_exception()
self.module.fail_json(msg="Error updating volume %s" % self.name,
exception=str(err))
def apply(self):
changed = False
volume_exists = False
update_volume = False
volume_detail = self.get_volume()
if volume_detail:
volume_exists = True
if self.state == 'absent':
# Checking for state change(s) here, and applying it later in the code allows us to support
# check_mode
changed = True
elif self.state == 'present':
if volume_detail.access is not None and self.access is not None and volume_detail.access != self.access:
update_volume = True
changed = True
elif volume_detail.account_id is not None and self.account_id is not None \
and volume_detail.account_id != self.account_id:
update_volume = True
changed = True
elif volume_detail.qos is not None and self.qos is not None and volume_detail.qos != self.qos:
update_volume = True
changed = True
elif volume_detail.total_size is not None and volume_detail.total_size != self.size:
size_difference = abs(float(volume_detail.total_size - self.size))
# Change size only if difference is bigger than 0.001
if size_difference/self.size > 0.001:
update_volume = True
changed = True
elif volume_detail.attributes is not None and self.attributes is not None and \
volume_detail.attributes != self.attributes:
update_volume = True
changed = True
else:
if self.state == 'present':
changed = True
result_message = ""
if changed:
if self.module.check_mode:
result_message = "Check mode, skipping changes"
pass
else:
if self.state == 'present':
if not volume_exists:
self.create_volume()
result_message = "Volume created"
elif update_volume:
self.update_volume()
result_message = "Volume updated"
elif self.state == 'absent':
self.delete_volume()
result_message = "Volume deleted"
self.module.exit_json(changed=changed, msg=result_message)
def main():
v = SolidFireVolume()
v.apply()
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import print_function
import random
from os import environ
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component that publishes events with no payload
and with complex payload every second.
"""
@inlineCallbacks
def onJoin(self, details):
print("session attached")
counter = 0
while True:
print("publish: com.myapp.heartbeat")
self.publish('com.myapp.heartbeat')
obj = {'counter': counter, 'foo': [1, 2, 3]}
print("publish: com.myapp.topic2", obj)
self.publish('com.myapp.topic2', random.randint(0, 100), 23,
c="Hello", d=obj)
counter += 1
yield sleep(1)
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", "ws://localhost:8080/ws"),
u"crossbardemo",
debug_wamp=False, # optional; log many WAMP details
debug=False, # optional; log even more details
)
runner.run(Component)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Django development settings.
"""
from base import *
import os
# --- Debug Settings ---
DEBUG = TEMPLATE_DEBUG = True
# --- /Debug Settings ---
# --- Email Configuration ---
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# --- /Email Configuration ---
# --- Celery Configuration ---
# http://docs.celeryproject.org/en/latest/django/first-steps-with-django.html
INSTALLED_APPS += (
"kombu.transport.django",
)
BROKER_URL = "django://"
# --- /Celery Configuration ---
# --- Django Faker Configuration ---
INSTALLED_APPS += (
"django_faker",
)
FAKER_LOCALE = None
FAKER_PROVIDERS = None
# --- /Django Faker Configuration ---
# --- Django-Debug-Toolbar Settings ---
show_toolbar = lambda x: True
INSTALLED_APPS += ("debug_toolbar",)
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': 'webapp.settings.dev.show_toolbar',
}
# --- /Django-Debug-Toolbar Settings ---
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
#Dan Blankenberg
import sys, os
assert sys.version_info[:2] >= ( 2, 4 )
def __main__():
base_dir = os.path.join( os.getcwd(), "bacteria" )
try:
base_dir = sys.argv[1]
except:
print "using default base_dir:", base_dir
loc_out = os.path.join( base_dir, "seq.loc" )
try:
loc_out = os.path.join( base_dir, sys.argv[2] )
except:
print "using default seq.loc:", loc_out
organisms = {}
loc_out = open( loc_out, 'wb' )
for result in os.walk( base_dir ):
this_base_dir, sub_dirs, files = result
for file in files:
if file[-5:] == ".info":
dict = {}
info_file = open( os.path.join( this_base_dir, file ), 'r' )
info = info_file.readlines()
info_file.close()
for line in info:
fields = line.replace( "\n", "" ).split( "=" )
dict[fields[0]]="=".join(fields[1:])
if 'genome project id' in dict.keys():
name = dict['genome project id']
if 'build' in dict.keys():
name = dict['build']
if name not in organisms.keys():
organisms[name] = {'chrs':{}, 'base_dir':this_base_dir}
for key in dict.keys():
organisms[name][key] = dict[key]
else:
if dict['organism'] not in organisms.keys():
organisms[dict['organism']] = {'chrs':{},'base_dir':this_base_dir}
organisms[dict['organism']]['chrs'][dict['chromosome']] = dict
for org in organisms:
org = organisms[org]
try:
build = org['genome project id']
except: continue
if 'build' in org:
build = org['build']
seq_path = os.path.join( org['base_dir'], "seq" )
#create seq dir, if exists go to next org
##TODO: add better checking, i.e. for updating
try:
os.mkdir( seq_path )
except:
print "Skipping", build
#continue
loc_out.write( "seq %s %s\n" % ( build, seq_path ) )
#print org info
for chr in org['chrs']:
chr = org['chrs'][chr]
fasta_file = os.path.join( org['base_dir'], "%s.fna" % chr['chromosome'] )
nib_out_file = os.path.join( seq_path, "%s.nib "% chr['chromosome'] )
#create nibs using faToNib binary
#TODO: when bx supports writing nib, use it here instead
command = "faToNib %s %s" % ( fasta_file, nib_out_file )
os.system( command )
loc_out.close()
if __name__ == "__main__": __main__()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""
jinja2.utils
~~~~~~~~~~~~
Utility functions.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
import errno
from collections import deque
from threading import Lock
from jinja2._compat import text_type, string_types, implements_iterator, \
url_quote
_word_split_re = re.compile(r'(\s+)')
_punctuation_re = re.compile(
'^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % (
'|'.join(map(re.escape, ('(', '<', '<'))),
'|'.join(map(re.escape, ('.', ',', ')', '>', '\n', '>')))
)
)
_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
_entity_re = re.compile(r'&([^;]+);')
_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
_digits = '0123456789'
# special singleton representing missing values for the runtime
missing = type('MissingType', (), {'__repr__': lambda x: 'missing'})()
# internal code
internal_code = set()
concat = u''.join
def contextfunction(f):
"""This decorator can be used to mark a function or method context callable.
A context callable is passed the active :class:`Context` as first argument when
called from the template. This is useful if a function wants to get access
to the context or functions provided on the context object. For example
a function that returns a sorted list of template variables the current
template exports could look like this::
@contextfunction
def get_exported_names(context):
return sorted(context.exported_vars)
"""
f.contextfunction = True
return f
def evalcontextfunction(f):
"""This decorator can be used to mark a function or method as an eval
context callable. This is similar to the :func:`contextfunction`
but instead of passing the context, an evaluation context object is
passed. For more information about the eval context, see
:ref:`eval-context`.
.. versionadded:: 2.4
"""
f.evalcontextfunction = True
return f
def environmentfunction(f):
"""This decorator can be used to mark a function or method as environment
callable. This decorator works exactly like the :func:`contextfunction`
decorator just that the first argument is the active :class:`Environment`
and not context.
"""
f.environmentfunction = True
return f
def internalcode(f):
"""Marks the function as internally used"""
internal_code.add(f.__code__)
return f
def is_undefined(obj):
"""Check if the object passed is undefined. This does nothing more than
performing an instance check against :class:`Undefined` but looks nicer.
This can be used for custom filters or tests that want to react to
undefined variables. For example a custom default filter can look like
this::
def default(var, default=''):
if is_undefined(var):
return default
return var
"""
from jinja2.runtime import Undefined
return isinstance(obj, Undefined)
def consume(iterable):
"""Consumes an iterable without doing anything with it."""
for event in iterable:
pass
def clear_caches():
"""Jinja2 keeps internal caches for environments and lexers. These are
used so that Jinja2 doesn't have to recreate environments and lexers all
the time. Normally you don't have to care about that but if you are
messuring memory consumption you may want to clean the caches.
"""
from jinja2.environment import _spontaneous_environments
from jinja2.lexer import _lexer_cache
_spontaneous_environments.clear()
_lexer_cache.clear()
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If the `silent` is True the return value will be `None` if the import
fails.
:return: imported object
"""
try:
if ':' in import_name:
module, obj = import_name.split(':', 1)
elif '.' in import_name:
items = import_name.split('.')
module = '.'.join(items[:-1])
obj = items[-1]
else:
return __import__(import_name)
return getattr(__import__(module, None, None, [obj]), obj)
except (ImportError, AttributeError):
if not silent:
raise
def open_if_exists(filename, mode='rb'):
"""Returns a file descriptor for the filename if that file exists,
otherwise `None`.
"""
try:
return open(filename, mode)
except IOError as e:
if e.errno not in (errno.ENOENT, errno.EISDIR):
raise
def object_type_repr(obj):
"""Returns the name of the object's type. For some recognized
singletons the name of the object is returned instead. (For
example for `None` and `Ellipsis`).
"""
if obj is None:
return 'None'
elif obj is Ellipsis:
return 'Ellipsis'
# __builtin__ in 2.x, builtins in 3.x
if obj.__class__.__module__ in ('__builtin__', 'builtins'):
name = obj.__class__.__name__
else:
name = obj.__class__.__module__ + '.' + obj.__class__.__name__
return '%s object' % name
def pformat(obj, verbose=False):
"""Prettyprint an object. Either use the `pretty` library or the
builtin `pprint`.
"""
try:
from pretty import pretty
return pretty(obj, verbose=verbose)
except ImportError:
from pprint import pformat
return pformat(obj)
def urlize(text, trim_url_limit=None, nofollow=False, target=None):
"""Converts any URLs in text into clickable links. Works on http://,
https:// and www. links. Links can have trailing punctuation (periods,
commas, close-parens) and leading punctuation (opening parens) and
it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text will be limited
to trim_url_limit characters.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
If target is not None, a target attribute will be added to the link.
"""
trim_url = lambda x, limit=trim_url_limit: limit is not None \
and (x[:limit] + (len(x) >=limit and '...'
or '')) or x
words = _word_split_re.split(text_type(escape(text)))
nofollow_attr = nofollow and ' rel="nofollow"' or ''
if target is not None and isinstance(target, string_types):
target_attr = ' target="%s"' % target
else:
target_attr = ''
for i, word in enumerate(words):
match = _punctuation_re.match(word)
if match:
lead, middle, trail = match.groups()
if middle.startswith('www.') or (
'@' not in middle and
not middle.startswith('http://') and
not middle.startswith('https://') and
len(middle) > 0 and
middle[0] in _letters + _digits and (
middle.endswith('.org') or
middle.endswith('.net') or
middle.endswith('.com')
)):
middle = '<a href="http://%s"%s%s>%s</a>' % (middle,
nofollow_attr, target_attr, trim_url(middle))
if middle.startswith('http://') or \
middle.startswith('https://'):
middle = '<a href="%s"%s%s>%s</a>' % (middle,
nofollow_attr, target_attr, trim_url(middle))
if '@' in middle and not middle.startswith('www.') and \
not ':' in middle and _simple_email_re.match(middle):
middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
if lead + middle + trail != word:
words[i] = lead + middle + trail
return u''.join(words)
def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
"""Generate some lorem impsum for the template."""
from jinja2.constants import LOREM_IPSUM_WORDS
from random import choice, randrange
words = LOREM_IPSUM_WORDS.split()
result = []
for _ in range(n):
next_capitalized = True
last_comma = last_fullstop = 0
word = None
last = None
p = []
# each paragraph contains out of 20 to 100 words.
for idx, _ in enumerate(range(randrange(min, max))):
while True:
word = choice(words)
if word != last:
last = word
break
if next_capitalized:
word = word.capitalize()
next_capitalized = False
# add commas
if idx - randrange(3, 8) > last_comma:
last_comma = idx
last_fullstop += 2
word += ','
# add end of sentences
if idx - randrange(10, 20) > last_fullstop:
last_comma = last_fullstop = idx
word += '.'
next_capitalized = True
p.append(word)
# ensure that the paragraph ends with a dot.
p = u' '.join(p)
if p.endswith(','):
p = p[:-1] + '.'
elif not p.endswith('.'):
p += '.'
result.append(p)
if not html:
return u'\n\n'.join(result)
return Markup(u'\n'.join(u'<p>%s</p>' % escape(x) for x in result))
def unicode_urlencode(obj, charset='utf-8'):
"""URL escapes a single bytestring or unicode string with the
given charset if applicable to URL safe quoting under all rules
that need to be considered under all supported Python versions.
If non strings are provided they are converted to their unicode
representation first.
"""
if not isinstance(obj, string_types):
obj = text_type(obj)
if isinstance(obj, text_type):
obj = obj.encode(charset)
return text_type(url_quote(obj))
class LRUCache(object):
"""A simple LRU Cache implementation."""
# this is fast for small capacities (something below 1000) but doesn't
# scale. But as long as it's only used as storage for templates this
# won't do any harm.
def __init__(self, capacity):
self.capacity = capacity
self._mapping = {}
self._queue = deque()
self._postinit()
def _postinit(self):
# alias all queue methods for faster lookup
self._popleft = self._queue.popleft
self._pop = self._queue.pop
self._remove = self._queue.remove
self._wlock = Lock()
self._append = self._queue.append
def __getstate__(self):
return {
'capacity': self.capacity,
'_mapping': self._mapping,
'_queue': self._queue
}
def __setstate__(self, d):
self.__dict__.update(d)
self._postinit()
def __getnewargs__(self):
return (self.capacity,)
def copy(self):
"""Return a shallow copy of the instance."""
rv = self.__class__(self.capacity)
rv._mapping.update(self._mapping)
rv._queue = deque(self._queue)
return rv
def get(self, key, default=None):
"""Return an item from the cache dict or `default`"""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
self._wlock.acquire()
try:
try:
return self[key]
except KeyError:
self[key] = default
return default
finally:
self._wlock.release()
def clear(self):
"""Clear the cache."""
self._wlock.acquire()
try:
self._mapping.clear()
self._queue.clear()
finally:
self._wlock.release()
def __contains__(self, key):
"""Check if a key exists in this cache."""
return key in self._mapping
def __len__(self):
"""Return the current size of the cache."""
return len(self._mapping)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self._mapping
)
def __getitem__(self, key):
"""Get an item from the cache. Moves the item up so that it has the
highest priority then.
Raise a `KeyError` if it does not exist.
"""
self._wlock.acquire()
try:
rv = self._mapping[key]
if self._queue[-1] != key:
try:
self._remove(key)
except ValueError:
# if something removed the key from the container
# when we read, ignore the ValueError that we would
# get otherwise.
pass
self._append(key)
return rv
finally:
self._wlock.release()
def __setitem__(self, key, value):
"""Sets the value for an item. Moves the item up so that it
has the highest priority then.
"""
self._wlock.acquire()
try:
if key in self._mapping:
self._remove(key)
elif len(self._mapping) == self.capacity:
del self._mapping[self._popleft()]
self._append(key)
self._mapping[key] = value
finally:
self._wlock.release()
def __delitem__(self, key):
"""Remove an item from the cache dict.
Raise a `KeyError` if it does not exist.
"""
self._wlock.acquire()
try:
del self._mapping[key]
try:
self._remove(key)
except ValueError:
# __getitem__ is not locked, it might happen
pass
finally:
self._wlock.release()
def items(self):
"""Return a list of items."""
result = [(key, self._mapping[key]) for key in list(self._queue)]
result.reverse()
return result
def iteritems(self):
"""Iterate over all items."""
return iter(self.items())
def values(self):
"""Return a list of all values."""
return [x[1] for x in self.items()]
def itervalue(self):
"""Iterate over all values."""
return iter(self.values())
def keys(self):
"""Return a list of all keys ordered by most recent usage."""
return list(self)
def iterkeys(self):
"""Iterate over all keys in the cache dict, ordered by
the most recent usage.
"""
return reversed(tuple(self._queue))
__iter__ = iterkeys
def __reversed__(self):
"""Iterate over the values in the cache dict, oldest items
coming first.
"""
return iter(tuple(self._queue))
__copy__ = copy
# register the LRU cache as mutable mapping if possible
try:
from collections import MutableMapping
MutableMapping.register(LRUCache)
except ImportError:
pass
@implements_iterator
class Cycler(object):
"""A cycle helper for templates."""
def __init__(self, *items):
if not items:
raise RuntimeError('at least one item has to be provided')
self.items = items
self.reset()
def reset(self):
"""Resets the cycle."""
self.pos = 0
@property
def current(self):
"""Returns the current item."""
return self.items[self.pos]
def __next__(self):
"""Goes one item ahead and returns it."""
rv = self.current
self.pos = (self.pos + 1) % len(self.items)
return rv
class Joiner(object):
"""A joining helper for templates."""
def __init__(self, sep=u', '):
self.sep = sep
self.used = False
def __call__(self):
if not self.used:
self.used = True
return u''
return self.sep
# Imported here because that's where it was in the past
from markupsafe import Markup, escape, soft_unicode
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright (c) 2017 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockito.internal.junit;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.assertEquals;
import static org.mockito.internal.stubbing.answers.DoesNothing.doesNothing;
import java.util.Arrays;
import java.util.Collections;
import org.junit.Test;
import org.mockito.internal.invocation.InvocationBuilder;
import org.mockito.internal.stubbing.StubbedInvocationMatcher;
import org.mockito.internal.util.SimpleMockitoLogger;
import org.mockito.stubbing.Stubbing;
import org.mockitoutil.TestBase;
public class UnusedStubbingsTest extends TestBase {
private SimpleMockitoLogger logger = new SimpleMockitoLogger();
@Test
public void no_unused_stubbings() throws Exception {
// given
UnusedStubbings stubbings = new UnusedStubbings(Collections.<Stubbing>emptyList());
// when
stubbings.format("MyTest.myTestMethod", logger);
// then
assertEquals("", logger.getLoggedInfo());
}
@Test
public void unused_stubbings() throws Exception {
// given
UnusedStubbings stubbings =
new UnusedStubbings(
Arrays.asList(
new StubbedInvocationMatcher(
doesNothing(),
new InvocationBuilder().toInvocationMatcher(),
null),
new StubbedInvocationMatcher(
doesNothing(),
new InvocationBuilder().toInvocationMatcher(),
null)));
// when
stubbings.format("MyTest.myTestMethod", logger);
// then
assertThat(filterLineNo(logger.getLoggedInfo()))
.isIn(
"[MockitoHint] MyTest.myTestMethod (see javadoc for MockitoHint):\n"
+ // Java <9
"[MockitoHint] 1. Unused -> at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n"
+ "[MockitoHint] 2. Unused -> at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n",
"[MockitoHint] MyTest.myTestMethod (see javadoc for MockitoHint):\n"
+ // Java 9
"[MockitoHint] 1. Unused -> at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n"
+ "[MockitoHint] 2. Unused -> at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\n",
"[MockitoHint] MyTest.myTestMethod (see javadoc for MockitoHint):\n"
+ // Java 21
"[MockitoHint] 1. Unused -> at java.base/jdk.internal.reflect.DirectMethodHandleAccessor.invoke(DirectMethodHandleAccessor.java:0)\n"
+ "[MockitoHint] 2. Unused -> at java.base/jdk.internal.reflect.DirectMethodHandleAccessor.invoke(DirectMethodHandleAccessor.java:0)\n");
}
}
|
java
|
github
|
https://github.com/mockito/mockito
|
mockito-core/src/test/java/org/mockito/internal/junit/UnusedStubbingsTest.java
|
"""Tests for return_direct tool graph structure."""
from langchain_core.tools import tool
from syrupy.assertion import SnapshotAssertion
from langchain.agents.factory import create_agent
from tests.unit_tests.agents.model import FakeToolCallingModel
def test_agent_graph_without_return_direct_tools(snapshot: SnapshotAssertion) -> None:
"""Test that graph WITHOUT return_direct tools does NOT have edge from tools to end."""
@tool
def normal_tool(input_string: str) -> str:
"""A normal tool without return_direct."""
return input_string
agent = create_agent(
model=FakeToolCallingModel(),
tools=[normal_tool],
system_prompt="You are a helpful assistant.",
)
# The mermaid diagram should NOT include an edge from tools to __end__
# when no tools have return_direct=True
mermaid_diagram = agent.get_graph().draw_mermaid()
assert mermaid_diagram == snapshot
def test_agent_graph_with_return_direct_tool(snapshot: SnapshotAssertion) -> None:
"""Test that graph WITH return_direct tools has correct edge from tools to end."""
@tool(return_direct=True)
def return_direct_tool(input_string: str) -> str:
"""A tool with return_direct=True."""
return input_string
agent = create_agent(
model=FakeToolCallingModel(),
tools=[return_direct_tool],
system_prompt="You are a helpful assistant.",
)
# The mermaid diagram SHOULD include an edge from tools to __end__
# when at least one tool has return_direct=True
mermaid_diagram = agent.get_graph().draw_mermaid()
assert mermaid_diagram == snapshot
def test_agent_graph_with_mixed_tools(snapshot: SnapshotAssertion) -> None:
"""Test that graph with mixed tools (some return_direct, some not) has correct edges."""
@tool(return_direct=True)
def return_direct_tool(input_string: str) -> str:
"""A tool with return_direct=True."""
return input_string
@tool
def normal_tool(input_string: str) -> str:
"""A normal tool without return_direct."""
return input_string
agent = create_agent(
model=FakeToolCallingModel(),
tools=[return_direct_tool, normal_tool],
system_prompt="You are a helpful assistant.",
)
# The mermaid diagram SHOULD include an edge from tools to __end__
# because at least one tool has return_direct=True
mermaid_diagram = agent.get_graph().draw_mermaid()
assert mermaid_diagram == snapshot
|
python
|
github
|
https://github.com/langchain-ai/langchain
|
libs/langchain_v1/tests/unit_tests/agents/test_return_direct_graph.py
|
from unittest import TestCase, main
from os.path import join
from os import remove, rename
from moi.group import get_id_from_user
from moi import ctx_default
from qiita_core.util import qiita_test_checker
from qiita_db.analysis import Analysis
from qiita_db.job import Job
from qiita_db.util import get_db_files_base_dir
from qiita_ware.analysis_pipeline import RunAnalysis
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
@qiita_test_checker()
class TestRun(TestCase):
def setUp(self):
self._del_files = []
def tearDown(self):
for delfile in self._del_files:
remove(delfile)
def test_failure_callback(self):
"""Make sure failure at file creation step doesn't hang everything"""
# rename a needed file for creating the biom table
base = get_db_files_base_dir()
rename(join(base, "processed_data",
"1_study_1001_closed_reference_otu_table.biom"),
join(base, "processed_data", "1_study_1001.bak"))
analysis = Analysis(2)
group = get_id_from_user("demo@microbio.me")
try:
app = RunAnalysis(moi_context=ctx_default,
moi_parent_id=group)
app(analysis, [], rarefaction_depth=100)
self.assertEqual(analysis.status, 'error')
for job_id in analysis.jobs:
self.assertEqual(Job(job_id).status, 'error')
finally:
rename(join(base, "processed_data", "1_study_1001.bak"),
join(base, "processed_data",
"1_study_1001_closed_reference_otu_table.biom"))
def test_add_jobs_in_construct_job_graphs(self):
analysis = Analysis(2)
RunAnalysis()._construct_job_graph(
analysis, [('18S', 'Summarize Taxa')],
comm_opts={'Summarize Taxa': {'opt1': 5}})
self.assertEqual(analysis.jobs, [3, 4])
job = Job(4)
self.assertEqual(job.datatype, '18S')
self.assertEqual(job.command,
['Summarize Taxa', 'summarize_taxa_through_plots.py'])
expopts = {
'--output_dir': join(
get_db_files_base_dir(), 'job',
'4_summarize_taxa_through_plots.py_output_dir'),
'opt1': 5}
self.assertEqual(job.options, expopts)
if __name__ == "__main__":
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import numpy as np
from numpy.testing import *
class TestBuiltin(TestCase):
def test_run(self):
"""Only test hash runs at all."""
for t in [np.int, np.float, np.complex, np.int32, np.str, np.object,
np.unicode]:
dt = np.dtype(t)
hash(dt)
class TestRecord(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', np.int)])
b = np.dtype([('yo', np.int)])
self.assertTrue(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', np.int)])
b = np.dtype([('ye', np.int)])
self.assertTrue(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
self.assertTrue(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
def test_not_lists(self):
"""Test if an appropriate exception is raised when passing bad values to
the dtype constructor.
"""
self.assertRaises(TypeError, np.dtype,
dict(names=set(['A', 'B']), formats=['f8', 'i4']))
self.assertRaises(TypeError, np.dtype,
dict(names=['A', 'B'], formats=set(['f8', 'i4'])))
class TestSubarray(TestCase):
def test_single_subarray(self):
a = np.dtype((np.int, (2)))
b = np.dtype((np.int, (2,)))
self.assertTrue(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 3)))
self.assertTrue(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (3, 2)))
self.assertTrue(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 2)))
self.assertTrue(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (1, 2, 3)))
b = np.dtype((np.int, (1, 2)))
self.assertTrue(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestMonsterType(TestCase):
"""Test deeply nested subtypes."""
def test1(self):
simple1 = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
a = np.dtype([('yo', np.int), ('ye', simple1),
('yi', np.dtype((np.int, (3, 2))))])
b = np.dtype([('yo', np.int), ('ye', simple1),
('yi', np.dtype((np.int, (3, 2))))])
self.assertTrue(hash(a) == hash(b))
c = np.dtype([('yo', np.int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
d = np.dtype([('yo', np.int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
self.assertTrue(hash(c) == hash(d))
class TestBasicFunctions(TestCase):
def test_compare(self):
a = np.dtype('i')
b = np.dtype('i')
self.assertTrue(a == b)
a = np.dtype([('one', np.dtype('d')), ('two', np.dtype('i'))])
b = np.dtype([('one', np.dtype('d')), ('two', np.dtype('i'))])
c = np.dtype([('two', np.dtype('i')), ('one', np.dtype('d'))])
self.assertTrue(a == a)
self.assertTrue(a == b)
self.assertFalse(b == c)
self.assertFalse(a != a)
self.assertFalse(a != b)
self.assertTrue(b != c)
# Try using the repeat operation and make sure the base is correct.
c = b * 3
self.assertFalse(c == b)
self.assertTrue(c.base == b)
def test_seq(self):
a = np.dtype([('one', np.dtype('d')), ('two', np.dtype('i'))])
self.assertTrue(a[0] == np.dtype('d'))
self.assertTrue(a['two'] == np.dtype('i'))
self.assertFalse(a['two'] == np.dtype('d'))
try:
x = a[2]
self.assertTrue(False, "Failed to catch index out of range exception.")
except:
pass
try:
x = a['foo']
self.assertTrue(False, 'Failed to catch incorrect field name exception.')
except:
pass
# Make sure scalar int values work as index values.
arr = np.arange(4)
self.assertTrue(a[arr[0]] == np.dtype('d'))
self.assertTrue(a[arr[1]] == np.dtype('i'))
try:
x = a[arr[2]]
self.assertTrue(False, 'Failed to catch index out of range exception using ScalarInt index value.')
except:
pass
if __name__ == '__main__':
import unittest
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# This file is part of Moksha.
# Copyright (C) 2008-2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Luke Macken <lmacken@redhat.com>
import os
import moksha.common.utils
import logging
import pkg_resources
import warnings
import types
from collections import defaultdict
from paste.deploy.converters import asbool
from inspect import isclass
from sqlalchemy import create_engine
from moksha.common.exc import MokshaException
from moksha.common.lib.helpers import get_moksha_config_path
from moksha.common.lib.helpers import appconfig
log = logging.getLogger(__name__)
# A list of all the entry points
APPS = 'moksha.application'
WIDGETS = 'moksha.widget'
ROOT = 'moksha.root'
MENUS = 'moksha.menu'
class MokshaMiddleware(object):
"""
A layer of WSGI middleware that is responsible for setting up the moksha
environment, as well as handling every request/response in the application.
If a request for an application comes in (/apps/$NAME), it will dispatch to
the WSGI Application or RootController of that application as defined in
it's egg-info entry-points.
This middleware also sets up the `moksha.livewidgets` StackedObjectProxy,
which acts as a registry for Moksha LiveWidget topic callbacks.
"""
def __init__(self, application, config):
log.info('Creating Moksha Middleware')
self.application = application
self.config = config
moksha.common.utils._apps = {}
moksha.common.utils._widgets = {} # {'widget name': tw.api.Widget}
moksha.common.utils.menus = {} # {'menu name': moksha.api.menus.MokshaMenu}
self.engines = {} # {'app name': sqlalchemy.engine.base.Engine}
self.load_paths()
self.load_configs()
self.load_widgets()
self.load_applications()
self.load_models()
self.load_menus()
self.load_root()
def __call__(self, environ, start_response):
self.register_livewidgets(environ)
return self.application(environ, start_response)
def register_livewidgets(self, environ):
""" Register the `moksha.livewidgets` dictionary.
This is a per-request StackedObjectProxy that is used by the
LiveWidgets to register their own topic callbacks. The Moksha Live
Socket then handles subscribing widgets to their appropriate topics,
decoding the incoming JSON data, and dispatching messages to them as
they arrive.
"""
environ['paste.registry'].register(moksha.wsgi.lib.utils.livewidgets, {
'onopen': [],
'onclose': [],
'onerror': [],
'onerrorframe': [],
'onconnectedframe': [],
'onmessageframe': defaultdict(list) # {topic: [js_callback,]}
})
def load_paths(self):
""" Load the names and paths of all moksha applications and widgets.
We must do this before actually loading the widgets or applications, to
ensure that we parse and load each of their configuration files
beforehand.
"""
for app_entry in pkg_resources.iter_entry_points(APPS):
if app_entry.name in moksha.common.utils._apps:
raise MokshaException('Duplicate application name: %s' %
app_entry.name)
app_path = app_entry.dist.location
moksha.common.utils._apps[app_entry.name] = {
'name': app_entry.name,
'project_name': app_entry.dist.project_name,
'path': app_path,
}
for widget_entry in pkg_resources.iter_entry_points(WIDGETS):
if widget_entry.name in moksha.common.utils._widgets:
raise MokshaException('Duplicate widget name: %s' %
widget_entry.name)
widget_path = widget_entry.dist.location
moksha.common.utils._widgets[widget_entry.name] = {
'name': widget_entry.name,
'project_name': widget_entry.dist.project_name,
'path': widget_path,
}
def load_applications(self):
log.info('Loading moksha applications')
for app_entry in pkg_resources.iter_entry_points(APPS):
log.info('Loading %s application' % app_entry.name)
app_class = app_entry.load()
app_path = app_entry.dist.location
app_name = getattr(app_class, 'name', app_entry.name)
if isclass(app_class):
app_class = app_class()
moksha.common.utils._apps[app_entry.name].update({
'name': app_name,
'controller': app_class,
'path': app_path,
'model': None,
})
try:
# Try to import the 'model' module alongside its 'controllers'
module = '.'.join(app_class.__module__.split('.')[:-2] +
['model'])
model = __import__(module, globals(), locals(),
[app_entry.name])
moksha.common.utils._apps[app_entry.name]['model'] = model
except ImportError, e:
log.debug("Cannot find application model: %r" % module)
def load_widgets(self):
""" Load widgets from entry points. """
log.info('Loading moksha widgets')
import tw2.core.widgets
from moksha.wsgi.widgets.api.live import LiveWidgetMeta
def is_live(widget):
return isinstance(widget, LiveWidgetMeta)
for widget_entry in pkg_resources.iter_entry_points(WIDGETS):
log.info('Loading %s widget' % widget_entry.name)
widget_class = widget_entry.load()
if isinstance(widget_class, types.FunctionType):
widget_class = widget_class(config=self.config)
widget_path = widget_entry.dist.location
moksha.common.utils._widgets[widget_entry.name] = {
'name': getattr(widget_class, 'name', widget_entry.name),
'widget': widget_class,
'path': widget_path,
'live': is_live(widget_class),
}
def load_menus(self):
log.info('Loading moksha menus')
for menu_entry in pkg_resources.iter_entry_points(MENUS):
log.info('Loading %s menu' % menu_entry.name)
menu_class = menu_entry.load()
menu_path = menu_entry.dist.location
moksha.common.utils.menus[menu_entry.name] = menu_class(menu_entry.name)
def load_configs(self):
""" Load the configuration files for all applications.
Here we iterate over all applications, loading their configuration
files and merging their [DEFAULT] configuration into ours. This
requires that applications do not have conflicting configuration
variable names. To mitigate this, applications should use some basic
variable namespacing, such as `myapp.myvariable = myvalue`.
We first make sure to load up Moksha's configuration, for the cases
where it is being run as WSGI middleware in a different environment.
"""
apps = []
loaded_configs = []
conf_d = '/etc/moksha/conf.d/%s/'
moksha_config_path = get_moksha_config_path()
if moksha_config_path:
moksha_config_path = os.path.dirname(moksha_config_path)
apps = [{'path': moksha_config_path}]
apps += moksha.common.utils._apps.values()
for app in apps:
for configfile in ('production.ini', 'development.ini'):
for path in (app['path'], conf_d % app.get('project_name')):
confpath = os.path.join(path, configfile)
if os.path.exists(confpath):
conf = appconfig('config:' + confpath)
if app.get('name'):
moksha.common.utils._apps[app['name']]['config'] = conf
if confpath in loaded_configs:
continue
log.info('Loading configuration: %s' % confpath)
# This is leftover from the days of using paste.deploy.appconfig. Is anything
# using this?
# for entry in conf.global_conf:
# if entry.startswith('_'):
# continue
# if entry in config:
# log.warning('Conflicting variable: %s' % entry)
# continue
# else:
# config[entry] = conf.global_conf[entry]
# log.debug('Set `%s` in global config' % entry)
loaded_configs.append(confpath)
break
def load_models(self):
""" Setup the SQLAlchemy database models for all moksha applications.
This method first looks to see if your application has a
``sqlalchemy.url`` set in it's configuration file, and will create a
SQLAlchemy engine with it. If it does not exist, Moksha will create an
engine for your application based on the ``app_db`` configuration,
which defaults to ``sqlite:///$APPNAME.db``.
It will then bind the engine to your model's
:class:`sqlalchemy.MetaData`, and initialize all of your tables,
if they don't already exist.
"""
for name, app in moksha.common.utils._apps.items():
sa_url = app.get('config', {}).get('sqlalchemy.url', None)
app_db = self.config.get('app_db', 'sqlite:///%s.db')
if sa_url:
if app['config']['__file__'] == get_moksha_config_path():
# Moksha's apps don't specify their own SA url
self.engines[name] = create_engine(app_db % name)
else:
# App has specified its own engine url
self.engines[name] = create_engine(sa_url)
# If a `model` module exists in the application, call it's
# `init_model` method,and bind the engine to it's `metadata`.
if app.get('model'):
if not sa_url:
self.engines[name] = create_engine(app_db % name)
log.debug('Creating database engine for %s' % app['name'])
app['model'].init_model(self.engines[name])
app['model'].metadata.create_all(bind=self.engines[name])
def load_root(self):
""" Load the root controller.
This allows developers to configure Moksha to directly hit their
TurboGears controller or WSGI app. You can also have the root of your
website be a single widget.
This is an example entry-point in your setup.py/pavement.py::
[moksha.root]
root = myproject.controllers.root:RootController
"""
root = None
for root_entry in pkg_resources.iter_entry_points(ROOT):
log.info('Loading the root of the project: %r' %
root_entry.dist.project_name)
if root_entry.name == 'root':
root_class = root_entry.load()
moksha.common.utils.root = root_class
# TODO: support setting a root widget
#if issubclass(root_class, Widget):
# widget = root_class(root_class.__name__)
# moksha.common.utils._widgets[root_entry.name] = {
# 'name': getattr(root_class, 'name', widget_entry.name),
# 'widget': widget,
# 'path': root_entry.dist.location,
# }
# TODO: handle root wsgi apps
else:
log.error('Ignoring [moksha.root] entry %r')
log.error('Please expose at most 1 object on this entry-point,'
' named "root".')
def make_moksha_middleware(app, config):
if asbool(config.get('moksha.connectors', False)):
raise NotImplementedError(
"moksha.connectors has moved to fedora-community"
)
if asbool(config.get('moksha.extensionpoints', True)):
from moksha.wsgi.middleware import MokshaExtensionPointMiddleware
app = MokshaExtensionPointMiddleware(app, config)
app = MokshaMiddleware(app, config)
if asbool(config.get('moksha.csrf_protection', False)):
raise NotImplementedError(
"moksha.csrf_protection has been moved to python-fedora")
if asbool(config.get('moksha.registry', True)):
from paste.registry import RegistryManager
app = RegistryManager(app)
return app
|
unknown
|
codeparrot/codeparrot-clean
| ||
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.util.CleanerUtil;
import org.apache.hadoop.util.Preconditions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@InterfaceAudience.Private
public class CryptoStreamUtils {
private static final int MIN_BUFFER_SIZE = 512;
private static final Logger LOG =
LoggerFactory.getLogger(CryptoStreamUtils.class);
/**
* Forcibly free the direct buffer.
*
* @param buffer buffer.
*/
public static void freeDB(ByteBuffer buffer) {
if (CleanerUtil.UNMAP_SUPPORTED) {
try {
CleanerUtil.getCleaner().freeBuffer(buffer);
} catch (IOException e) {
LOG.info("Failed to free the buffer", e);
}
} else {
LOG.trace(CleanerUtil.UNMAP_NOT_SUPPORTED_REASON);
}
}
/**
* Read crypto buffer size.
*
* @param conf configuration.
* @return hadoop.security.crypto.buffer.size.
*/
public static int getBufferSize(Configuration conf) {
return conf.getInt(HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY,
HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT);
}
/**
* AES/CTR/NoPadding or SM4/CTR/NoPadding is required.
*
* @param codec crypto codec.
*/
public static void checkCodec(CryptoCodec codec) {
if (codec.getCipherSuite() != CipherSuite.AES_CTR_NOPADDING &&
codec.getCipherSuite() != CipherSuite.SM4_CTR_NOPADDING) {
throw new UnsupportedCodecException(
"AES/CTR/NoPadding or SM4/CTR/NoPadding is required");
}
}
/**
* Check and floor buffer size.
*
* @param codec crypto codec.
* @param bufferSize the size of the buffer to be used.
* @return calc buffer size.
*/
public static int checkBufferSize(CryptoCodec codec, int bufferSize) {
Preconditions.checkArgument(bufferSize >= MIN_BUFFER_SIZE,
"Minimum value of buffer size is " + MIN_BUFFER_SIZE + ".");
return bufferSize - bufferSize % codec.getCipherSuite()
.getAlgorithmBlockSize();
}
/**
* If input stream is {@link org.apache.hadoop.fs.Seekable}, return it's
* current position, otherwise return 0;
*
* @param in wrapper.
* @return current position, otherwise return 0.
* @throws IOException raised on errors performing I/O.
*/
public static long getInputStreamOffset(InputStream in) throws IOException {
if (in instanceof Seekable) {
return ((Seekable) in).getPos();
}
return 0;
}
}
|
java
|
github
|
https://github.com/apache/hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields, api
from odoo.tools import populate, pycompat
class TestPopulateModel(models.Model):
_name = 'test.populate'
_description = 'Test Populate'
name = fields.Char(default='Foo')
state = fields.Selection([('a', 'A'), ('b', 'B')], default='a')
active = fields.Boolean('Active', default=True)
category_id = fields.Many2one('test.populate.category', 'Category')
some_ref = fields.Integer('Reference')
dependant_field_1 = fields.Char('Dependant 1')
dependant_field_2 = fields.Char('Dependant 2')
sequence = fields.Integer("Sequence")
_populate_dependencies = ['test.populate.category']
_populate_sizes = {
'small': 20,
'medium': 30,
'large': 100,
}
def _populate_factories(self):
# cross dependant field in a sub generator, cartesian product of two fields
dependant_factories = [
('dependant_field_1', populate.cartesian(['d1_1', 'd1_2'])),
('dependant_field_2', populate.cartesian(['d2_1', 'd2_2', 'd2_3_{counter}'])),
]
def generate_dependant(iterator, *args):
dependants_generator = populate.chain_factories(dependant_factories, self._name)
for values in dependants_generator:
dependant_values = next(iterator)
yield {**values, **dependant_values, '__complete': values['__complete'] and dependant_values['__complete']}
def get_name(values=None, counter=0, **kwargs):
active = 'active' if values['active'] else 'inactive'
cat = 'filling' if values['__complete'] else 'corner'
return '%s_%s_%s' % (active, cat, counter)
category_ids = self.env.registry.populated_models['test.populate.category']
return [
('active', populate.cartesian([True, False], [3, 1])),
('state', populate.cartesian([False] + self.env['test.populate']._fields['state'].get_values(self.env))),
('some_ref', populate.iterate([False, 1, 2, 3, 4])),
('_dependant', generate_dependant),
('name', populate.compute(get_name)),
('category_id', populate.randomize([False] + category_ids)),
('sequence', populate.randint(1, 10))
]
class TestPopulateDependencyModel(models.Model):
_name = 'test.populate.category'
_description = 'Test Populate Category'
_populate_sizes = {
'small': 3,
'medium': 10,
'large': 20,
}
name = fields.Char('Name', required=True, default='Cat1')
active = fields.Boolean('Active', default=True)
def _populate_factories(self):
return [
('active', populate.cartesian([True, False], [9, 1])),
('name', populate.cartesian(['Cat1', 'Cat2', 'Cat3'])),
]
class TestNoPopulateModelInherit(models.Model):
_name = 'test.populate.inherit'
_inherit = 'test.populate'
_description = 'Test populate inherit'
additionnal_field = fields.Char(required=True)
def _populate_factories(self):
return super()._populate_factories() + [
('additionnal_field', populate.iterate(['V1', 'V2', 'V3'])),
]
class TestNoPopulateModel(models.Model):
_name = 'test.no.populate'
_description = 'A model with no populate method and a required field, should not crash'
name = fields.Char(required=True)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright (c) 2017 Mockito contributors
* This program is made available under the terms of the MIT License.
*/
package org.mockitousage.junitrunner;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import static org.mockitoutil.JUnitResultAssert.assertThat;
import org.junit.Test;
import org.junit.runner.JUnitCore;
import org.junit.runner.Result;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.exceptions.misusing.UnfinishedStubbingException;
import org.mockito.internal.util.SimpleMockitoLogger;
import org.mockito.junit.TestableJUnitRunner;
import org.mockitousage.IMethods;
import org.mockitoutil.TestBase;
public class StubbingWarningsJUnitRunnerTest extends TestBase {
JUnitCore runner = new JUnitCore();
SimpleMockitoLogger logger = TestableJUnitRunner.refreshedLogger();
@Test
public void no_arg_mismatch_warnings() {
// when
runner.run(
PassingArgMismatch.class,
FailingWithMatchingArgs.class,
MismatchButStubAlreadyUsed.class);
// then
assertEquals("", filterLineNo(logger.getLoggedInfo()));
}
@Test
public void shows_arg_mismatch_warnings_when_test_fails() {
// when
runner.run(FailingWithArgMismatch.class);
// then
assertEquals(
"[MockitoHint] FailingWithArgMismatch.test (see javadoc for MockitoHint):\n"
+ "[MockitoHint] 1. Unused... -> at org.mockitousage.junitrunner.StubbingWarningsJUnitRunnerTest$FailingWithArgMismatch.test(StubbingWarningsJUnitRunnerTest.java:0)\n"
+ "[MockitoHint] ...args ok? -> at org.mockitousage.junitrunner.StubbingWarningsJUnitRunnerTest$FailingWithArgMismatch.test(StubbingWarningsJUnitRunnerTest.java:0)\n",
filterLineNo(logger.getLoggedInfo()));
}
@Test
public void shows_arg_mismatch_warnings_only_for_mismatches() {
// when
runner.run(FailingWithSomeStubMismatches.class);
// then
assertEquals(
"[MockitoHint] FailingWithSomeStubMismatches.test (see javadoc for MockitoHint):\n"
+ "[MockitoHint] 1. Unused... -> at org.mockitousage.junitrunner.StubbingWarningsJUnitRunnerTest$FailingWithSomeStubMismatches.test(StubbingWarningsJUnitRunnerTest.java:0)\n"
+ "[MockitoHint] ...args ok? -> at org.mockitousage.junitrunner.StubbingWarningsJUnitRunnerTest$FailingWithSomeStubMismatches.test(StubbingWarningsJUnitRunnerTest.java:0)\n",
filterLineNo(logger.getLoggedInfo()));
}
@Test
public void validates_mockito_usage() {
// when
Result result = runner.run(InvalidMockitoUsage.class);
// then
assertThat(result).fails(1, UnfinishedStubbingException.class);
}
@RunWith(TestableJUnitRunner.class)
public static class PassingArgMismatch {
IMethods mock = mock(IMethods.class);
@Test
public void test() throws Exception {
when(mock.simpleMethod(1)).thenReturn("1");
mock.simpleMethod(2);
}
}
@RunWith(TestableJUnitRunner.class)
public static class FailingWithArgMismatch {
@Mock IMethods mock;
@Test
public void test() throws Exception {
when(mock.simpleMethod(1)).thenReturn("1");
mock.simpleMethod(2);
throw new RuntimeException("x");
}
}
@RunWith(TestableJUnitRunner.class)
public static class FailingWithMatchingArgs {
@Mock IMethods mock;
@Test
public void test() throws Exception {
when(mock.simpleMethod(1)).thenReturn("1");
mock.simpleMethod(1);
throw new RuntimeException("x");
}
}
@RunWith(TestableJUnitRunner.class)
public static class FailingWithSomeStubMismatches {
@Mock IMethods mock;
@Test
public void test() throws Exception {
when(mock.simpleMethod(1)).thenReturn("1"); // <- used
when(mock.simpleMethod(2)).thenReturn("2"); // <- unused
mock.simpleMethod(1); // <- not reported
mock.simpleMethod(3); // <- reported
throw new RuntimeException("x");
}
}
@RunWith(TestableJUnitRunner.class)
public static class MismatchButStubAlreadyUsed {
@Mock IMethods mock;
@Test
public void test() throws Exception {
when(mock.simpleMethod(1)).thenReturn("1");
mock.simpleMethod(1); // <-- used
mock.simpleMethod(2); // <-- arg mismatch, but the stub was already used
throw new RuntimeException("x");
}
}
@RunWith(TestableJUnitRunner.class)
public static class InvalidMockitoUsage {
@Mock IMethods mock;
@SuppressWarnings({"MockitoUsage", "CheckReturnValue"})
@Test
public void test() throws Exception {
when(mock.simpleMethod()); // <-- unfinished stubbing
}
}
}
|
java
|
github
|
https://github.com/mockito/mockito
|
mockito-core/src/test/java/org/mockitousage/junitrunner/StubbingWarningsJUnitRunnerTest.java
|
#!/usr/bin/env bash
set -e
# Expected to be in api directory
cd "$(dirname "${BASH_SOURCE[0]}")/.."
echo "Validating swagger.yaml..."
yamllint -f parsable -c validate/yamllint.yaml swagger.yaml
if out=$(swagger validate swagger.yaml); then
echo "Validation done! ${out}"
else
echo "${out}" >&2
false
fi
|
unknown
|
github
|
https://github.com/moby/moby
|
api/scripts/validate-swagger.sh
|
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by validation-gen. DO NOT EDIT.
package v1alpha1
import (
context "context"
fmt "fmt"
nodev1alpha1 "k8s.io/api/node/v1alpha1"
equality "k8s.io/apimachinery/pkg/api/equality"
operation "k8s.io/apimachinery/pkg/api/operation"
safe "k8s.io/apimachinery/pkg/api/safe"
validate "k8s.io/apimachinery/pkg/api/validate"
runtime "k8s.io/apimachinery/pkg/runtime"
field "k8s.io/apimachinery/pkg/util/validation/field"
)
func init() { localSchemeBuilder.Register(RegisterValidations) }
// RegisterValidations adds validation functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterValidations(scheme *runtime.Scheme) error {
// type RuntimeClass
scheme.AddValidationFunc((*nodev1alpha1.RuntimeClass)(nil), func(ctx context.Context, op operation.Operation, obj, oldObj interface{}) field.ErrorList {
switch op.Request.SubresourcePath() {
case "/":
return Validate_RuntimeClass(ctx, op, nil /* fldPath */, obj.(*nodev1alpha1.RuntimeClass), safe.Cast[*nodev1alpha1.RuntimeClass](oldObj))
}
return field.ErrorList{field.InternalError(nil, fmt.Errorf("no validation found for %T, subresource: %v", obj, op.Request.SubresourcePath()))}
})
return nil
}
// Validate_RuntimeClass validates an instance of RuntimeClass according
// to declarative validation rules in the API schema.
func Validate_RuntimeClass(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *nodev1alpha1.RuntimeClass) (errs field.ErrorList) {
// field nodev1alpha1.RuntimeClass.TypeMeta has no validation
// field nodev1alpha1.RuntimeClass.ObjectMeta has no validation
// field nodev1alpha1.RuntimeClass.Spec
errs = append(errs,
func(fldPath *field.Path, obj, oldObj *nodev1alpha1.RuntimeClassSpec, oldValueCorrelated bool) (errs field.ErrorList) {
// don't revalidate unchanged data
if oldValueCorrelated && op.Type == operation.Update && equality.Semantic.DeepEqual(obj, oldObj) {
return nil
}
// call the type's validation function
errs = append(errs, Validate_RuntimeClassSpec(ctx, op, fldPath, obj, oldObj)...)
return
}(fldPath.Child("spec"), &obj.Spec, safe.Field(oldObj, func(oldObj *nodev1alpha1.RuntimeClass) *nodev1alpha1.RuntimeClassSpec { return &oldObj.Spec }), oldObj != nil)...)
return errs
}
// Validate_RuntimeClassSpec validates an instance of RuntimeClassSpec according
// to declarative validation rules in the API schema.
func Validate_RuntimeClassSpec(ctx context.Context, op operation.Operation, fldPath *field.Path, obj, oldObj *nodev1alpha1.RuntimeClassSpec) (errs field.ErrorList) {
// field nodev1alpha1.RuntimeClassSpec.RuntimeHandler
errs = append(errs,
func(fldPath *field.Path, obj, oldObj *string, oldValueCorrelated bool) (errs field.ErrorList) {
// don't revalidate unchanged data
if oldValueCorrelated && op.Type == operation.Update && (obj == oldObj || (obj != nil && oldObj != nil && *obj == *oldObj)) {
return nil
}
// call field-attached validations
earlyReturn := false
if e := validate.Immutable(ctx, op, fldPath, obj, oldObj); len(e) != 0 {
errs = append(errs, e...)
earlyReturn = true
}
if e := validate.RequiredValue(ctx, op, fldPath, obj, oldObj); len(e) != 0 {
errs = append(errs, e...)
earlyReturn = true
}
if earlyReturn {
return // do not proceed
}
errs = append(errs, validate.ShortName(ctx, op, fldPath, obj, oldObj)...)
return
}(fldPath.Child("runtimeHandler"), &obj.RuntimeHandler, safe.Field(oldObj, func(oldObj *nodev1alpha1.RuntimeClassSpec) *string { return &oldObj.RuntimeHandler }), oldObj != nil)...)
// field nodev1alpha1.RuntimeClassSpec.Overhead has no validation
// field nodev1alpha1.RuntimeClassSpec.Scheduling has no validation
return errs
}
|
go
|
github
|
https://github.com/kubernetes/kubernetes
|
pkg/apis/node/v1alpha1/zz_generated.validations.go
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
sieve.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsRasterFileWriter,
QgsProcessingException,
QgsProcessingParameterDefinition,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterNumber,
QgsProcessingParameterBoolean,
QgsProcessingParameterString,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.tools.system import isWindows
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class sieve(GdalAlgorithm):
INPUT = 'INPUT'
THRESHOLD = 'THRESHOLD'
EIGHT_CONNECTEDNESS = 'EIGHT_CONNECTEDNESS'
NO_MASK = 'NO_MASK'
MASK_LAYER = 'MASK_LAYER'
EXTRA = 'EXTRA'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterNumber(self.THRESHOLD,
self.tr('Threshold'),
type=QgsProcessingParameterNumber.Integer,
minValue=0,
defaultValue=10))
self.addParameter(QgsProcessingParameterBoolean(self.EIGHT_CONNECTEDNESS,
self.tr('Use 8-connectedness'),
defaultValue=False))
self.addParameter(QgsProcessingParameterBoolean(self.NO_MASK,
self.tr('Do not use the default validity mask for the input band'),
defaultValue=False))
self.addParameter(QgsProcessingParameterRasterLayer(self.MASK_LAYER,
self.tr('Validity mask'),
optional=True))
extra_param = QgsProcessingParameterString(self.EXTRA,
self.tr('Additional command-line parameters'),
defaultValue=None,
optional=True)
extra_param.setFlags(extra_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(extra_param)
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT, self.tr('Sieved')))
def name(self):
return 'sieve'
def displayName(self):
return self.tr('Sieve')
def group(self):
return self.tr('Raster analysis')
def groupId(self):
return 'rasteranalysis'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'sieve.png'))
def commandName(self):
return 'gdal_sieve'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
arguments = [
'-st',
str(self.parameterAsInt(parameters, self.THRESHOLD, context)),
]
if self.parameterAsBoolean(parameters, self.EIGHT_CONNECTEDNESS, context):
arguments.append('-8')
else:
arguments.append('-4')
if self.parameterAsBoolean(parameters, self.NO_MASK, context):
arguments.append('-nomask')
mask = self.parameterAsRasterLayer(parameters, self.MASK_LAYER, context)
if mask:
arguments.append('-mask')
arguments.append(mask.source())
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
self.setOutputValue(self.OUTPUT, out)
arguments.append('-of')
arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1]))
if self.EXTRA in parameters and parameters[self.EXTRA] not in (None, ''):
extra = self.parameterAsString(parameters, self.EXTRA, context)
arguments.append(extra)
raster = self.parameterAsRasterLayer(parameters, self.INPUT, context)
if raster is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT))
arguments.append(raster.source())
arguments.append(out)
return [self.commandName() + ('.bat' if isWindows() else '.py'), GdalUtils.escapeAndJoin(arguments)]
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.errors.InvalidTopicException;
import org.apache.kafka.common.errors.TopicAuthorizationException;
import org.apache.kafka.common.internals.ClusterResourceListeners;
import org.apache.kafka.common.internals.Topic;
import org.apache.kafka.common.message.MetadataResponseData;
import org.apache.kafka.common.message.MetadataResponseData.MetadataResponseBrokerCollection;
import org.apache.kafka.common.message.MetadataResponseData.MetadataResponsePartition;
import org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic;
import org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopicCollection;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.requests.MetadataRequest;
import org.apache.kafka.common.requests.MetadataResponse;
import org.apache.kafka.common.requests.RequestTestUtils;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.common.utils.MockTime;
import org.apache.kafka.common.utils.Time;
import org.apache.kafka.test.MockClusterResourceListener;
import org.junit.jupiter.api.Test;
import java.net.InetSocketAddress;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.OptionalInt;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import static org.apache.kafka.test.TestUtils.assertOptional;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class MetadataTest {
private final long refreshBackoffMs = 100;
private final long refreshBackoffMaxMs = 1000;
private final long metadataExpireMs = 1000;
private Metadata metadata = new Metadata(refreshBackoffMs, refreshBackoffMaxMs,
metadataExpireMs, new LogContext(), new ClusterResourceListeners());
private static MetadataResponse emptyMetadataResponse() {
return RequestTestUtils.metadataResponse(
Collections.emptyList(),
null,
-1,
Collections.emptyList());
}
@Test
public void testMetadataUpdateAfterClose() {
metadata.close();
assertThrows(IllegalStateException.class, () -> metadata.updateWithCurrentRequestVersion(emptyMetadataResponse(), false, 1000));
}
private static void checkTimeToNextUpdate(long refreshBackoffMs, long metadataExpireMs) {
long now = 10000;
// Metadata timeToNextUpdate is implicitly relying on the premise that the currentTimeMillis is always
// larger than the metadataExpireMs or refreshBackoffMs.
// It won't be a problem practically since all usages of Metadata calls first update() immediately after
// its construction.
if (metadataExpireMs > now || refreshBackoffMs > now) {
throw new IllegalArgumentException(
"metadataExpireMs and refreshBackoffMs must be smaller than 'now'");
}
long largerOfBackoffAndExpire = Math.max(refreshBackoffMs, metadataExpireMs);
// This test intentionally disabled exponential backoff, which results in constant backoff delays
Metadata metadata = new Metadata(refreshBackoffMs, refreshBackoffMs,
metadataExpireMs, new LogContext(), new ClusterResourceListeners());
assertEquals(0, metadata.timeToNextUpdate(now));
// lastSuccessfulRefreshMs updated to now.
metadata.updateWithCurrentRequestVersion(emptyMetadataResponse(), false, now);
// The last update was successful so the remaining time to expire the current metadata should be returned.
assertEquals(largerOfBackoffAndExpire, metadata.timeToNextUpdate(now));
// Metadata update requested explicitly
metadata.requestUpdate(true);
// Update requested so metadataExpireMs should no longer take effect.
assertEquals(refreshBackoffMs, metadata.timeToNextUpdate(now));
// Reset needUpdate to false.
metadata.updateWithCurrentRequestVersion(emptyMetadataResponse(), false, now);
assertEquals(largerOfBackoffAndExpire, metadata.timeToNextUpdate(now));
// Both metadataExpireMs and refreshBackoffMs elapsed.
now += largerOfBackoffAndExpire;
assertEquals(0, metadata.timeToNextUpdate(now));
assertEquals(0, metadata.timeToNextUpdate(now + 1));
}
@Test
public void testUpdateMetadataAllowedImmediatelyAfterBootstrap() {
MockTime time = new MockTime();
Metadata metadata = new Metadata(refreshBackoffMs, refreshBackoffMaxMs,
metadataExpireMs, new LogContext(), new ClusterResourceListeners());
metadata.bootstrap(Collections.singletonList(new InetSocketAddress("localhost", 9002)));
assertEquals(0, metadata.timeToAllowUpdate(time.milliseconds()));
assertEquals(0, metadata.timeToNextUpdate(time.milliseconds()));
}
@Test
public void testTimeToNextUpdate() {
checkTimeToNextUpdate(100, 1000);
checkTimeToNextUpdate(1000, 100);
checkTimeToNextUpdate(0, 0);
checkTimeToNextUpdate(0, 100);
checkTimeToNextUpdate(100, 0);
}
@Test
public void testTimeToNextUpdateRetryBackoff() {
long now = 10000;
// lastRefreshMs updated to now.
metadata.failedUpdate(now);
// Backing off. Remaining time until next try should be returned.
long lowerBoundBackoffMs = (long) (refreshBackoffMs * (1 - CommonClientConfigs.RETRY_BACKOFF_JITTER));
long upperBoundBackoffMs = (long) (refreshBackoffMs * (1 + CommonClientConfigs.RETRY_BACKOFF_JITTER));
assertEquals(refreshBackoffMs, metadata.timeToNextUpdate(now), upperBoundBackoffMs - lowerBoundBackoffMs);
// Even though metadata update requested explicitly, still respects backoff.
metadata.requestUpdate(true);
assertEquals(refreshBackoffMs, metadata.timeToNextUpdate(now), upperBoundBackoffMs - lowerBoundBackoffMs);
// refreshBackoffMs elapsed.
now += refreshBackoffMs + upperBoundBackoffMs;
// It should return 0 to let next try.
assertEquals(0, metadata.timeToNextUpdate(now));
assertEquals(0, metadata.timeToNextUpdate(now + 1));
}
/**
* Prior to Kafka version 2.4 (which coincides with Metadata version 9), the broker does not propagate leader epoch
* information accurately while a reassignment is in progress, so we cannot rely on it. This is explained in more
* detail in MetadataResponse's constructor.
*/
@Test
public void testIgnoreLeaderEpochInOlderMetadataResponse() {
TopicPartition tp = new TopicPartition("topic", 0);
MetadataResponsePartition partitionMetadata = new MetadataResponsePartition()
.setPartitionIndex(tp.partition())
.setLeaderId(5)
.setLeaderEpoch(10)
.setReplicaNodes(Arrays.asList(1, 2, 3))
.setIsrNodes(Arrays.asList(1, 2, 3))
.setOfflineReplicas(Collections.emptyList())
.setErrorCode(Errors.NONE.code());
MetadataResponseTopic topicMetadata = new MetadataResponseTopic()
.setName(tp.topic())
.setErrorCode(Errors.NONE.code())
.setPartitions(Collections.singletonList(partitionMetadata))
.setIsInternal(false);
MetadataResponseTopicCollection topics = new MetadataResponseTopicCollection();
topics.add(topicMetadata);
MetadataResponseData data = new MetadataResponseData()
.setClusterId("clusterId")
.setControllerId(0)
.setTopics(topics)
.setBrokers(new MetadataResponseBrokerCollection());
for (short version = ApiKeys.METADATA.oldestVersion(); version < 9; version++) {
Readable readable = MessageUtil.toByteBufferAccessor(data, version);
MetadataResponse response = MetadataResponse.parse(readable, version);
assertFalse(response.hasReliableLeaderEpochs());
metadata.updateWithCurrentRequestVersion(response, false, 100);
assertTrue(metadata.partitionMetadataIfCurrent(tp).isPresent());
MetadataResponse.PartitionMetadata responseMetadata = this.metadata.partitionMetadataIfCurrent(tp).get();
assertEquals(Optional.empty(), responseMetadata.leaderEpoch);
}
for (short version = 9; version <= ApiKeys.METADATA.latestVersion(); version++) {
Readable readable = MessageUtil.toByteBufferAccessor(data, version);
MetadataResponse response = MetadataResponse.parse(readable, version);
assertTrue(response.hasReliableLeaderEpochs());
metadata.updateWithCurrentRequestVersion(response, false, 100);
assertTrue(metadata.partitionMetadataIfCurrent(tp).isPresent());
MetadataResponse.PartitionMetadata responseMetadata = metadata.partitionMetadataIfCurrent(tp).get();
assertEquals(Optional.of(10), responseMetadata.leaderEpoch);
}
}
@Test
public void testStaleMetadata() {
TopicPartition tp = new TopicPartition("topic", 0);
MetadataResponsePartition partitionMetadata = new MetadataResponsePartition()
.setPartitionIndex(tp.partition())
.setLeaderId(1)
.setLeaderEpoch(10)
.setReplicaNodes(Arrays.asList(1, 2, 3))
.setIsrNodes(Arrays.asList(1, 2, 3))
.setOfflineReplicas(Collections.emptyList())
.setErrorCode(Errors.NONE.code());
MetadataResponseTopic topicMetadata = new MetadataResponseTopic()
.setName(tp.topic())
.setErrorCode(Errors.NONE.code())
.setPartitions(Collections.singletonList(partitionMetadata))
.setIsInternal(false);
MetadataResponseTopicCollection topics = new MetadataResponseTopicCollection();
topics.add(topicMetadata);
MetadataResponseData data = new MetadataResponseData()
.setClusterId("clusterId")
.setControllerId(0)
.setTopics(topics)
.setBrokers(new MetadataResponseBrokerCollection());
metadata.updateWithCurrentRequestVersion(new MetadataResponse(data, ApiKeys.METADATA.latestVersion()), false, 100);
// Older epoch with changed ISR should be ignored
partitionMetadata
.setPartitionIndex(tp.partition())
.setLeaderId(1)
.setLeaderEpoch(9)
.setReplicaNodes(Arrays.asList(1, 2, 3))
.setIsrNodes(Arrays.asList(1, 2))
.setOfflineReplicas(Collections.emptyList())
.setErrorCode(Errors.NONE.code());
metadata.updateWithCurrentRequestVersion(new MetadataResponse(data, ApiKeys.METADATA.latestVersion()), false, 101);
assertEquals(Optional.of(10), metadata.lastSeenLeaderEpoch(tp));
assertTrue(metadata.partitionMetadataIfCurrent(tp).isPresent());
MetadataResponse.PartitionMetadata responseMetadata = this.metadata.partitionMetadataIfCurrent(tp).get();
assertEquals(Arrays.asList(1, 2, 3), responseMetadata.inSyncReplicaIds);
assertEquals(Optional.of(10), responseMetadata.leaderEpoch);
}
@Test
public void testFailedUpdate() {
long time = 100;
metadata.updateWithCurrentRequestVersion(emptyMetadataResponse(), false, time);
assertEquals(100, metadata.timeToNextUpdate(1000));
metadata.failedUpdate(1100);
long lowerBoundBackoffMs = (long) (refreshBackoffMs * (1 - CommonClientConfigs.RETRY_BACKOFF_JITTER));
long upperBoundBackoffMs = (long) (refreshBackoffMs * (1 + CommonClientConfigs.RETRY_BACKOFF_JITTER));
assertEquals(100, metadata.timeToNextUpdate(1100), upperBoundBackoffMs - lowerBoundBackoffMs);
assertEquals(100, metadata.lastSuccessfulUpdate());
metadata.updateWithCurrentRequestVersion(emptyMetadataResponse(), false, time);
assertEquals(100, metadata.timeToNextUpdate(1000), upperBoundBackoffMs - lowerBoundBackoffMs);
}
@Test
public void testClusterListenerGetsNotifiedOfUpdate() {
MockClusterResourceListener mockClusterListener = new MockClusterResourceListener();
ClusterResourceListeners listeners = new ClusterResourceListeners();
listeners.maybeAdd(mockClusterListener);
metadata = new Metadata(refreshBackoffMs, refreshBackoffMaxMs,
metadataExpireMs, new LogContext(), listeners);
String hostName = "www.example.com";
metadata.bootstrap(Collections.singletonList(new InetSocketAddress(hostName, 9002)));
assertFalse(MockClusterResourceListener.IS_ON_UPDATE_CALLED.get(),
"ClusterResourceListener should not called when metadata is updated with bootstrap Cluster");
Map<String, Integer> partitionCounts = new HashMap<>();
partitionCounts.put("topic", 1);
partitionCounts.put("topic1", 1);
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, partitionCounts);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 100);
assertEquals("dummy", mockClusterListener.clusterResource().clusterId(),
"MockClusterResourceListener did not get cluster metadata correctly");
assertTrue(MockClusterResourceListener.IS_ON_UPDATE_CALLED.get(),
"MockClusterResourceListener should be called when metadata is updated with non-bootstrap Cluster");
}
@Test
public void testRequestUpdate() {
assertFalse(metadata.updateRequested());
int[] epochs = {42, 42, 41, 41, 42, 43, 43, 42, 41, 44};
boolean[] updateResult = {true, false, false, false, false, true, false, false, false, true};
TopicPartition tp = new TopicPartition("topic", 0);
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1,
Collections.emptyMap(), Collections.singletonMap("topic", 1), _tp -> 0);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 10L);
for (int i = 0; i < epochs.length; i++) {
metadata.updateLastSeenEpochIfNewer(tp, epochs[i]);
if (updateResult[i]) {
assertTrue(metadata.updateRequested(), "Expected metadata update to be requested [" + i + "]");
} else {
assertFalse(metadata.updateRequested(), "Did not expect metadata update to be requested [" + i + "]");
}
metadata.updateWithCurrentRequestVersion(emptyMetadataResponse(), false, 0L);
assertFalse(metadata.updateRequested());
}
}
@Test
public void testUpdateLastEpoch() {
TopicPartition tp = new TopicPartition("topic-1", 0);
MetadataResponse metadataResponse = emptyMetadataResponse();
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 0L);
// if we have no leader epoch, this call shouldn't do anything
assertFalse(metadata.updateLastSeenEpochIfNewer(tp, 0));
assertFalse(metadata.updateLastSeenEpochIfNewer(tp, 1));
assertFalse(metadata.updateLastSeenEpochIfNewer(tp, 2));
assertFalse(metadata.lastSeenLeaderEpoch(tp).isPresent());
// Metadata with newer epoch is handled
metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 10);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 1L);
assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(10, leaderAndEpoch.intValue()));
// Don't update to an older one
assertFalse(metadata.updateLastSeenEpochIfNewer(tp, 1));
assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(10, leaderAndEpoch.intValue()));
// Don't cause update if it's the same one
assertFalse(metadata.updateLastSeenEpochIfNewer(tp, 10));
assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(10, leaderAndEpoch.intValue()));
// Update if we see newer epoch
assertTrue(metadata.updateLastSeenEpochIfNewer(tp, 12));
assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(12, leaderAndEpoch.intValue()));
metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 12);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 2L);
assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(12, leaderAndEpoch.intValue()));
// Don't overwrite metadata with older epoch
metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 11);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 3L);
assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(12, leaderAndEpoch.intValue()));
}
@Test
public void testEpochUpdateAfterTopicDeletion() {
TopicPartition tp = new TopicPartition("topic-1", 0);
MetadataResponse metadataResponse = emptyMetadataResponse();
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 0L);
// Start with a Topic topic-1 with a random topic ID
Map<String, Uuid> topicIds = Collections.singletonMap("topic-1", Uuid.randomUuid());
metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 10, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 1L);
assertEquals(Optional.of(10), metadata.lastSeenLeaderEpoch(tp));
// Topic topic-1 is now deleted so Response contains an Error. LeaderEpoch should still maintain Old value
metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.singletonMap("topic-1", Errors.UNKNOWN_TOPIC_OR_PARTITION), Collections.emptyMap());
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 1L);
assertEquals(Optional.of(10), metadata.lastSeenLeaderEpoch(tp));
// Create topic-1 again but this time with a different topic ID. LeaderEpoch should be updated to new even if lower.
Map<String, Uuid> newTopicIds = Collections.singletonMap("topic-1", Uuid.randomUuid());
metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 5, newTopicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 1L);
assertEquals(Optional.of(5), metadata.lastSeenLeaderEpoch(tp));
}
@Test
public void testEpochUpdateOnChangedTopicIds() {
TopicPartition tp = new TopicPartition("topic-1", 0);
Map<String, Uuid> topicIds = Collections.singletonMap("topic-1", Uuid.randomUuid());
MetadataResponse metadataResponse = emptyMetadataResponse();
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 0L);
// Start with a topic with no topic ID
metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 100);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 1L);
assertEquals(Optional.of(100), metadata.lastSeenLeaderEpoch(tp));
// If the older topic ID is null, we should go with the new topic ID as the leader epoch
metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 10, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 2L);
assertEquals(Optional.of(10), metadata.lastSeenLeaderEpoch(tp));
// Don't cause update if it's the same one
metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 10, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 3L);
assertEquals(Optional.of(10), metadata.lastSeenLeaderEpoch(tp));
// Update if we see newer epoch
metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 12, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 4L);
assertEquals(Optional.of(12), metadata.lastSeenLeaderEpoch(tp));
// We should also update if we see a new topicId even if the epoch is lower
Map<String, Uuid> newTopicIds = Collections.singletonMap("topic-1", Uuid.randomUuid());
metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 3, newTopicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 5L);
assertEquals(Optional.of(3), metadata.lastSeenLeaderEpoch(tp));
// Finally, update when the topic ID is new and the epoch is higher
Map<String, Uuid> newTopicIds2 = Collections.singletonMap("topic-1", Uuid.randomUuid());
metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 20, newTopicIds2);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 6L);
assertEquals(Optional.of(20), metadata.lastSeenLeaderEpoch(tp));
}
@Test
public void testRejectOldMetadata() {
Map<String, Integer> partitionCounts = new HashMap<>();
partitionCounts.put("topic-1", 1);
TopicPartition tp = new TopicPartition("topic-1", 0);
metadata.updateWithCurrentRequestVersion(emptyMetadataResponse(), false, 0L);
// First epoch seen, accept it
{
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), partitionCounts, _tp -> 100);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 10L);
assertNotNull(metadata.fetch().partition(tp));
assertTrue(metadata.lastSeenLeaderEpoch(tp).isPresent());
assertEquals(100, metadata.lastSeenLeaderEpoch(tp).get().longValue());
}
// Fake an empty ISR, but with an older epoch, should reject it
{
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), partitionCounts, _tp -> 99,
(error, partition, leader, leaderEpoch, replicas, isr, offlineReplicas) ->
new MetadataResponse.PartitionMetadata(error, partition, leader,
leaderEpoch, replicas, Collections.emptyList(), offlineReplicas), ApiKeys.METADATA.latestVersion(), Collections.emptyMap());
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 20L);
assertEquals(1, metadata.fetch().partition(tp).inSyncReplicas().length);
assertEquals(100, metadata.lastSeenLeaderEpoch(tp).get().longValue());
}
// Fake an empty ISR, with same epoch, accept it
{
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), partitionCounts, _tp -> 100,
(error, partition, leader, leaderEpoch, replicas, isr, offlineReplicas) ->
new MetadataResponse.PartitionMetadata(error, partition, leader,
leaderEpoch, replicas, Collections.emptyList(), offlineReplicas), ApiKeys.METADATA.latestVersion(), Collections.emptyMap());
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 20L);
assertEquals(0, metadata.fetch().partition(tp).inSyncReplicas().length);
assertEquals(100, metadata.lastSeenLeaderEpoch(tp).get().longValue());
}
// Empty metadata response, should not keep old partition but should keep the last-seen epoch
{
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.emptyMap());
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 20L);
assertNull(metadata.fetch().partition(tp));
assertEquals(100, metadata.lastSeenLeaderEpoch(tp).get().longValue());
}
// Back in the metadata, with old epoch, should not get added
{
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), partitionCounts, _tp -> 99);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 10L);
assertNull(metadata.fetch().partition(tp));
assertEquals(100, metadata.lastSeenLeaderEpoch(tp).get().longValue());
}
}
@Test
public void testOutOfBandEpochUpdate() {
Map<String, Integer> partitionCounts = new HashMap<>();
partitionCounts.put("topic-1", 5);
TopicPartition tp = new TopicPartition("topic-1", 0);
metadata.updateWithCurrentRequestVersion(emptyMetadataResponse(), false, 0L);
assertFalse(metadata.updateLastSeenEpochIfNewer(tp, 99));
// Update epoch to 100
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), partitionCounts, _tp -> 100);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 10L);
assertNotNull(metadata.fetch().partition(tp));
assertTrue(metadata.lastSeenLeaderEpoch(tp).isPresent());
assertEquals(100, metadata.lastSeenLeaderEpoch(tp).get().longValue());
// Simulate a leader epoch from another response, like a fetch response or list offsets
assertTrue(metadata.updateLastSeenEpochIfNewer(tp, 101));
// Cache of partition stays, but current partition info is not available since it's stale
assertNotNull(metadata.fetch().partition(tp));
assertEquals(5, Objects.requireNonNull(metadata.fetch().partitionCountForTopic("topic-1")).longValue());
assertFalse(metadata.partitionMetadataIfCurrent(tp).isPresent());
assertEquals(101, metadata.lastSeenLeaderEpoch(tp).get().longValue());
// Metadata with older epoch is rejected, metadata state is unchanged
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 20L);
assertNotNull(metadata.fetch().partition(tp));
assertEquals(5, Objects.requireNonNull(metadata.fetch().partitionCountForTopic("topic-1")).longValue());
assertFalse(metadata.partitionMetadataIfCurrent(tp).isPresent());
assertEquals(101, metadata.lastSeenLeaderEpoch(tp).get().longValue());
// Metadata with equal or newer epoch is accepted
metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), partitionCounts, _tp -> 101);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 30L);
assertNotNull(metadata.fetch().partition(tp));
assertEquals(5, Objects.requireNonNull(metadata.fetch().partitionCountForTopic("topic-1")).longValue());
assertTrue(metadata.partitionMetadataIfCurrent(tp).isPresent());
assertEquals(101, metadata.lastSeenLeaderEpoch(tp).get().longValue());
}
@Test
public void testNoEpoch() {
metadata.updateWithCurrentRequestVersion(emptyMetadataResponse(), false, 0L);
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1));
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 10L);
TopicPartition tp = new TopicPartition("topic-1", 0);
// no epoch
assertFalse(metadata.lastSeenLeaderEpoch(tp).isPresent());
// still works
assertTrue(metadata.partitionMetadataIfCurrent(tp).isPresent());
assertEquals(0, metadata.partitionMetadataIfCurrent(tp).get().partition());
assertEquals(Optional.of(0), metadata.partitionMetadataIfCurrent(tp).get().leaderId);
// Since epoch was null, this shouldn't update it
metadata.updateLastSeenEpochIfNewer(tp, 10);
assertTrue(metadata.partitionMetadataIfCurrent(tp).isPresent());
assertFalse(metadata.partitionMetadataIfCurrent(tp).get().leaderEpoch.isPresent());
}
@Test
public void testClusterCopy() {
Map<String, Integer> counts = new HashMap<>();
Map<String, Errors> errors = new HashMap<>();
counts.put("topic1", 2);
counts.put("topic2", 3);
counts.put(Topic.GROUP_METADATA_TOPIC_NAME, 3);
errors.put("topic3", Errors.INVALID_TOPIC_EXCEPTION);
errors.put("topic4", Errors.TOPIC_AUTHORIZATION_FAILED);
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 4, errors, counts);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 0L);
Cluster cluster = metadata.fetch();
assertEquals("dummy", cluster.clusterResource().clusterId());
assertEquals(4, cluster.nodes().size());
// topic counts
assertEquals(cluster.invalidTopics(), Collections.singleton("topic3"));
assertEquals(cluster.unauthorizedTopics(), Collections.singleton("topic4"));
assertEquals(3, cluster.topics().size());
assertEquals(cluster.internalTopics(), Collections.singleton(Topic.GROUP_METADATA_TOPIC_NAME));
// partition counts
assertEquals(2, cluster.partitionsForTopic("topic1").size());
assertEquals(3, cluster.partitionsForTopic("topic2").size());
// Sentinel instances
InetSocketAddress address = InetSocketAddress.createUnresolved("localhost", 0);
Cluster fromMetadata = MetadataSnapshot.bootstrap(Collections.singletonList(address)).cluster();
Cluster fromCluster = Cluster.bootstrap(Collections.singletonList(address));
assertEquals(fromMetadata, fromCluster);
Cluster fromMetadataEmpty = MetadataSnapshot.empty().cluster();
Cluster fromClusterEmpty = Cluster.empty();
assertEquals(fromMetadataEmpty, fromClusterEmpty);
}
@Test
public void testRequestVersion() {
Time time = new MockTime();
metadata.requestUpdate(true);
Metadata.MetadataRequestAndVersion versionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
metadata.update(versionAndBuilder.requestVersion,
RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)), false, time.milliseconds());
assertFalse(metadata.updateRequested());
// bump the request version for new topics added to the metadata
metadata.requestUpdateForNewTopics();
// simulating a bump while a metadata request is in flight
versionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
metadata.requestUpdateForNewTopics();
metadata.update(versionAndBuilder.requestVersion,
RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)), true, time.milliseconds());
// metadata update is still needed
assertTrue(metadata.updateRequested());
// the next update will resolve it
versionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
metadata.update(versionAndBuilder.requestVersion,
RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)), true, time.milliseconds());
assertFalse(metadata.updateRequested());
}
@Test
public void testPartialMetadataUpdate() {
Time time = new MockTime();
metadata = new Metadata(refreshBackoffMs, refreshBackoffMaxMs, metadataExpireMs, new LogContext(), new ClusterResourceListeners()) {
@Override
protected MetadataRequest.Builder newMetadataRequestBuilderForNewTopics() {
return newMetadataRequestBuilder();
}
};
assertFalse(metadata.updateRequested());
// Request a metadata update. This must force a full metadata update request.
metadata.requestUpdate(true);
Metadata.MetadataRequestAndVersion versionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
assertFalse(versionAndBuilder.isPartialUpdate);
metadata.update(versionAndBuilder.requestVersion,
RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)), false, time.milliseconds());
assertFalse(metadata.updateRequested());
// Request a metadata update for a new topic. This should perform a partial metadata update.
metadata.requestUpdateForNewTopics();
versionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
assertTrue(versionAndBuilder.isPartialUpdate);
metadata.update(versionAndBuilder.requestVersion,
RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)), true, time.milliseconds());
assertFalse(metadata.updateRequested());
// Request both types of metadata updates. This should always perform a full update.
metadata.requestUpdate(true);
metadata.requestUpdateForNewTopics();
versionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
assertFalse(versionAndBuilder.isPartialUpdate);
metadata.update(versionAndBuilder.requestVersion,
RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)), false, time.milliseconds());
assertFalse(metadata.updateRequested());
// Request only a partial metadata update, but elapse enough time such that a full refresh is needed.
metadata.requestUpdateForNewTopics();
final long refreshTimeMs = time.milliseconds() + metadata.metadataExpireMs();
versionAndBuilder = metadata.newMetadataRequestAndVersion(refreshTimeMs);
assertFalse(versionAndBuilder.isPartialUpdate);
metadata.update(versionAndBuilder.requestVersion,
RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)), true, refreshTimeMs);
assertFalse(metadata.updateRequested());
// Request two partial metadata updates that are overlapping.
metadata.requestUpdateForNewTopics();
versionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
assertTrue(versionAndBuilder.isPartialUpdate);
metadata.requestUpdateForNewTopics();
Metadata.MetadataRequestAndVersion overlappingVersionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
assertTrue(overlappingVersionAndBuilder.isPartialUpdate);
assertTrue(metadata.updateRequested());
metadata.update(versionAndBuilder.requestVersion,
RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic-1", 1)), true, time.milliseconds());
assertTrue(metadata.updateRequested());
metadata.update(overlappingVersionAndBuilder.requestVersion,
RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic-2", 1)), true, time.milliseconds());
assertFalse(metadata.updateRequested());
}
@Test
public void testInvalidTopicError() {
Time time = new MockTime();
String invalidTopic = "topic dfsa";
MetadataResponse invalidTopicResponse = RequestTestUtils.metadataUpdateWith("clusterId", 1,
Collections.singletonMap(invalidTopic, Errors.INVALID_TOPIC_EXCEPTION), Collections.emptyMap());
metadata.updateWithCurrentRequestVersion(invalidTopicResponse, false, time.milliseconds());
InvalidTopicException e = assertThrows(InvalidTopicException.class, () -> metadata.maybeThrowAnyException());
assertEquals(Collections.singleton(invalidTopic), e.invalidTopics());
// We clear the exception once it has been raised to the user
metadata.maybeThrowAnyException();
// Reset the invalid topic error
metadata.updateWithCurrentRequestVersion(invalidTopicResponse, false, time.milliseconds());
// If we get a good update, the error should clear even if we haven't had a chance to raise it to the user
metadata.updateWithCurrentRequestVersion(emptyMetadataResponse(), false, time.milliseconds());
metadata.maybeThrowAnyException();
}
@Test
public void testTopicAuthorizationError() {
Time time = new MockTime();
String invalidTopic = "foo";
MetadataResponse unauthorizedTopicResponse = RequestTestUtils.metadataUpdateWith("clusterId", 1,
Collections.singletonMap(invalidTopic, Errors.TOPIC_AUTHORIZATION_FAILED), Collections.emptyMap());
metadata.updateWithCurrentRequestVersion(unauthorizedTopicResponse, false, time.milliseconds());
TopicAuthorizationException e = assertThrows(TopicAuthorizationException.class, () -> metadata.maybeThrowAnyException());
assertEquals(Collections.singleton(invalidTopic), e.unauthorizedTopics());
// We clear the exception once it has been raised to the user
metadata.maybeThrowAnyException();
// Reset the unauthorized topic error
metadata.updateWithCurrentRequestVersion(unauthorizedTopicResponse, false, time.milliseconds());
// If we get a good update, the error should clear even if we haven't had a chance to raise it to the user
metadata.updateWithCurrentRequestVersion(emptyMetadataResponse(), false, time.milliseconds());
metadata.maybeThrowAnyException();
}
@Test
public void testMetadataTopicErrors() {
Time time = new MockTime();
Map<String, Errors> topicErrors = new HashMap<>(3);
topicErrors.put("invalidTopic", Errors.INVALID_TOPIC_EXCEPTION);
topicErrors.put("sensitiveTopic1", Errors.TOPIC_AUTHORIZATION_FAILED);
topicErrors.put("sensitiveTopic2", Errors.TOPIC_AUTHORIZATION_FAILED);
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("clusterId", 1, topicErrors, Collections.emptyMap());
metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
TopicAuthorizationException e1 = assertThrows(TopicAuthorizationException.class,
() -> metadata.maybeThrowExceptionForTopic("sensitiveTopic1"));
assertEquals(Collections.singleton("sensitiveTopic1"), e1.unauthorizedTopics());
// We clear the exception once it has been raised to the user
metadata.maybeThrowAnyException();
metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
TopicAuthorizationException e2 = assertThrows(TopicAuthorizationException.class,
() -> metadata.maybeThrowExceptionForTopic("sensitiveTopic2"));
assertEquals(Collections.singleton("sensitiveTopic2"), e2.unauthorizedTopics());
metadata.maybeThrowAnyException();
metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
InvalidTopicException e3 = assertThrows(InvalidTopicException.class,
() -> metadata.maybeThrowExceptionForTopic("invalidTopic"));
assertEquals(Collections.singleton("invalidTopic"), e3.invalidTopics());
metadata.maybeThrowAnyException();
// Other topics should not throw exception, but they should clear existing exception
metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
metadata.maybeThrowExceptionForTopic("anotherTopic");
metadata.maybeThrowAnyException();
}
@Test
public void testNodeIfOffline() {
Map<String, Integer> partitionCounts = new HashMap<>();
partitionCounts.put("topic-1", 1);
Node node0 = new Node(0, "localhost", 9092);
Node node1 = new Node(1, "localhost", 9093);
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 2, Collections.emptyMap(), partitionCounts, _tp -> 99,
(error, partition, leader, leaderEpoch, replicas, isr, offlineReplicas) ->
new MetadataResponse.PartitionMetadata(error, partition, Optional.of(node0.id()), leaderEpoch,
Collections.singletonList(node0.id()), Collections.emptyList(),
Collections.singletonList(node1.id())), ApiKeys.METADATA.latestVersion(), Collections.emptyMap());
metadata.updateWithCurrentRequestVersion(emptyMetadataResponse(), false, 0L);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 10L);
TopicPartition tp = new TopicPartition("topic-1", 0);
assertOptional(metadata.fetch().nodeIfOnline(tp, 0), node -> assertEquals(0, node.id()));
assertFalse(metadata.fetch().nodeIfOnline(tp, 1).isPresent());
assertEquals(0, metadata.fetch().nodeById(0).id());
assertEquals(1, metadata.fetch().nodeById(1).id());
}
@Test
public void testNodeIfOnlineWhenNotInReplicaSet() {
Map<String, Integer> partitionCounts = new HashMap<>();
partitionCounts.put("topic-1", 1);
Node node0 = new Node(0, "localhost", 9092);
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 2, Collections.emptyMap(), partitionCounts, _tp -> 99,
(error, partition, leader, leaderEpoch, replicas, isr, offlineReplicas) ->
new MetadataResponse.PartitionMetadata(error, partition, Optional.of(node0.id()), leaderEpoch,
Collections.singletonList(node0.id()), Collections.emptyList(),
Collections.emptyList()), ApiKeys.METADATA.latestVersion(), Collections.emptyMap());
metadata.updateWithCurrentRequestVersion(emptyMetadataResponse(), false, 0L);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 10L);
TopicPartition tp = new TopicPartition("topic-1", 0);
assertEquals(1, metadata.fetch().nodeById(1).id());
assertFalse(metadata.fetch().nodeIfOnline(tp, 1).isPresent());
}
@Test
public void testNodeIfOnlineNonExistentTopicPartition() {
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(2, Collections.emptyMap());
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 0L);
TopicPartition tp = new TopicPartition("topic-1", 0);
assertEquals(0, metadata.fetch().nodeById(0).id());
assertNull(metadata.fetch().partition(tp));
assertEquals(metadata.fetch().nodeIfOnline(tp, 0), Optional.empty());
}
@Test
public void testLeaderMetadataInconsistentWithBrokerMetadata() {
// Tests a reordering scenario which can lead to inconsistent leader state.
// A partition initially has one broker offline. That broker comes online and
// is elected leader. The client sees these two events in the opposite order.
TopicPartition tp = new TopicPartition("topic", 0);
Node node0 = new Node(0, "localhost", 9092);
Node node1 = new Node(1, "localhost", 9093);
Node node2 = new Node(2, "localhost", 9094);
// The first metadata received by broker (epoch=10)
MetadataResponsePartition firstPartitionMetadata = new MetadataResponsePartition()
.setPartitionIndex(tp.partition())
.setErrorCode(Errors.NONE.code())
.setLeaderEpoch(10)
.setLeaderId(0)
.setReplicaNodes(Arrays.asList(0, 1, 2))
.setIsrNodes(Arrays.asList(0, 1, 2))
.setOfflineReplicas(Collections.emptyList());
// The second metadata received has stale metadata (epoch=8)
MetadataResponsePartition secondPartitionMetadata = new MetadataResponsePartition()
.setPartitionIndex(tp.partition())
.setErrorCode(Errors.NONE.code())
.setLeaderEpoch(8)
.setLeaderId(1)
.setReplicaNodes(Arrays.asList(0, 1, 2))
.setIsrNodes(Arrays.asList(1, 2))
.setOfflineReplicas(Collections.singletonList(0));
metadata.updateWithCurrentRequestVersion(new MetadataResponse(new MetadataResponseData()
.setTopics(buildTopicCollection(tp.topic(), firstPartitionMetadata))
.setBrokers(buildBrokerCollection(Arrays.asList(node0, node1, node2))),
ApiKeys.METADATA.latestVersion()),
false, 10L);
metadata.updateWithCurrentRequestVersion(new MetadataResponse(new MetadataResponseData()
.setTopics(buildTopicCollection(tp.topic(), secondPartitionMetadata))
.setBrokers(buildBrokerCollection(Arrays.asList(node1, node2))),
ApiKeys.METADATA.latestVersion()),
false, 20L);
assertNull(metadata.fetch().leaderFor(tp));
assertEquals(Optional.of(10), metadata.lastSeenLeaderEpoch(tp));
assertFalse(metadata.currentLeader(tp).leader.isPresent());
}
private MetadataResponseTopicCollection buildTopicCollection(String topic, MetadataResponsePartition partitionMetadata) {
MetadataResponseTopic topicMetadata = new MetadataResponseTopic()
.setErrorCode(Errors.NONE.code())
.setName(topic)
.setIsInternal(false);
topicMetadata.setPartitions(Collections.singletonList(partitionMetadata));
MetadataResponseTopicCollection topics = new MetadataResponseTopicCollection();
topics.add(topicMetadata);
return topics;
}
private MetadataResponseBrokerCollection buildBrokerCollection(List<Node> nodes) {
MetadataResponseBrokerCollection brokers = new MetadataResponseBrokerCollection();
for (Node node : nodes) {
MetadataResponseData.MetadataResponseBroker broker = new MetadataResponseData.MetadataResponseBroker()
.setNodeId(node.id())
.setHost(node.host())
.setPort(node.port())
.setRack(node.rack());
brokers.add(broker);
}
return brokers;
}
@Test
public void testMetadataMerge() {
Time time = new MockTime();
Map<String, Uuid> topicIds = new HashMap<>();
final AtomicReference<Set<String>> retainTopics = new AtomicReference<>(new HashSet<>());
metadata = new Metadata(refreshBackoffMs, refreshBackoffMaxMs, metadataExpireMs, new LogContext(), new ClusterResourceListeners()) {
@Override
protected boolean retainTopic(String topic, boolean isInternal, long nowMs) {
return retainTopics.get().contains(topic);
}
};
// Initialize a metadata instance with two topic variants "old" and "keep". Both will be retained.
String oldClusterId = "oldClusterId";
int oldNodes = 2;
Map<String, Errors> oldTopicErrors = new HashMap<>();
oldTopicErrors.put("oldInvalidTopic", Errors.INVALID_TOPIC_EXCEPTION);
oldTopicErrors.put("keepInvalidTopic", Errors.INVALID_TOPIC_EXCEPTION);
oldTopicErrors.put("oldUnauthorizedTopic", Errors.TOPIC_AUTHORIZATION_FAILED);
oldTopicErrors.put("keepUnauthorizedTopic", Errors.TOPIC_AUTHORIZATION_FAILED);
Map<String, Integer> oldTopicPartitionCounts = new HashMap<>();
oldTopicPartitionCounts.put("oldValidTopic", 2);
oldTopicPartitionCounts.put("keepValidTopic", 3);
retainTopics.set(Set.of(
"oldInvalidTopic",
"keepInvalidTopic",
"oldUnauthorizedTopic",
"keepUnauthorizedTopic",
"oldValidTopic",
"keepValidTopic"));
topicIds.put("oldValidTopic", Uuid.randomUuid());
topicIds.put("keepValidTopic", Uuid.randomUuid());
MetadataResponse metadataResponse =
RequestTestUtils.metadataUpdateWithIds(oldClusterId, oldNodes, oldTopicErrors, oldTopicPartitionCounts, _tp -> 100, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, true, time.milliseconds());
Map<String, Uuid> metadataTopicIds1 = metadata.topicIds();
retainTopics.get().forEach(topic -> assertEquals(metadataTopicIds1.get(topic), topicIds.get(topic)));
// Update the metadata to add a new topic variant, "new", which will be retained with "keep". Note this
// means that all of the "old" topics should be dropped.
Cluster cluster = metadata.fetch();
assertEquals(oldClusterId, cluster.clusterResource().clusterId());
assertEquals(oldNodes, cluster.nodes().size());
assertEquals(cluster.invalidTopics(), Set.of("oldInvalidTopic", "keepInvalidTopic"));
assertEquals(cluster.unauthorizedTopics(), Set.of("oldUnauthorizedTopic", "keepUnauthorizedTopic"));
assertEquals(cluster.topics(), Set.of("oldValidTopic", "keepValidTopic"));
assertEquals(2, cluster.partitionsForTopic("oldValidTopic").size());
assertEquals(3, cluster.partitionsForTopic("keepValidTopic").size());
assertEquals(new HashSet<>(cluster.topicIds()), new HashSet<>(topicIds.values()));
String newClusterId = "newClusterId";
int newNodes = oldNodes + 1;
Map<String, Errors> newTopicErrors = new HashMap<>();
newTopicErrors.put("newInvalidTopic", Errors.INVALID_TOPIC_EXCEPTION);
newTopicErrors.put("newUnauthorizedTopic", Errors.TOPIC_AUTHORIZATION_FAILED);
Map<String, Integer> newTopicPartitionCounts = new HashMap<>();
newTopicPartitionCounts.put("keepValidTopic", 2);
newTopicPartitionCounts.put("newValidTopic", 4);
retainTopics.set(Set.of(
"keepInvalidTopic",
"newInvalidTopic",
"keepUnauthorizedTopic",
"newUnauthorizedTopic",
"keepValidTopic",
"newValidTopic"));
topicIds.put("newValidTopic", Uuid.randomUuid());
metadataResponse = RequestTestUtils.metadataUpdateWithIds(newClusterId, newNodes, newTopicErrors, newTopicPartitionCounts, _tp -> 200, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, true, time.milliseconds());
topicIds.remove("oldValidTopic");
Map<String, Uuid> metadataTopicIds2 = metadata.topicIds();
retainTopics.get().forEach(topic -> assertEquals(metadataTopicIds2.get(topic), topicIds.get(topic)));
assertNull(metadataTopicIds2.get("oldValidTopic"));
cluster = metadata.fetch();
assertEquals(newClusterId, cluster.clusterResource().clusterId());
assertEquals(cluster.nodes().size(), newNodes);
assertEquals(cluster.invalidTopics(), Set.of("keepInvalidTopic", "newInvalidTopic"));
assertEquals(cluster.unauthorizedTopics(), Set.of("keepUnauthorizedTopic", "newUnauthorizedTopic"));
assertEquals(cluster.topics(), Set.of("keepValidTopic", "newValidTopic"));
assertEquals(2, cluster.partitionsForTopic("keepValidTopic").size());
assertEquals(4, cluster.partitionsForTopic("newValidTopic").size());
assertEquals(new HashSet<>(cluster.topicIds()), new HashSet<>(topicIds.values()));
// Perform another metadata update, but this time all topic metadata should be cleared.
retainTopics.set(Collections.emptySet());
metadataResponse = RequestTestUtils.metadataUpdateWithIds(newClusterId, newNodes, newTopicErrors, newTopicPartitionCounts, _tp -> 300, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, true, time.milliseconds());
Map<String, Uuid> metadataTopicIds3 = metadata.topicIds();
topicIds.forEach((topicName, topicId) -> assertNull(metadataTopicIds3.get(topicName)));
cluster = metadata.fetch();
assertEquals(newClusterId, cluster.clusterResource().clusterId());
assertEquals(cluster.nodes().size(), newNodes);
assertEquals(cluster.invalidTopics(), Collections.emptySet());
assertEquals(cluster.unauthorizedTopics(), Collections.emptySet());
assertEquals(cluster.topics(), Collections.emptySet());
assertTrue(cluster.topicIds().isEmpty());
}
@Test
public void testMetadataMergeOnIdDowngrade() {
Time time = new MockTime();
Map<String, Uuid> topicIds = new HashMap<>();
final AtomicReference<Set<String>> retainTopics = new AtomicReference<>(new HashSet<>());
metadata = new Metadata(refreshBackoffMs, refreshBackoffMaxMs, metadataExpireMs, new LogContext(), new ClusterResourceListeners()) {
@Override
protected boolean retainTopic(String topic, boolean isInternal, long nowMs) {
return retainTopics.get().contains(topic);
}
};
// Initialize a metadata instance with two topics. Both will be retained.
String clusterId = "clusterId";
int nodes = 2;
Map<String, Integer> topicPartitionCounts = new HashMap<>();
topicPartitionCounts.put("validTopic1", 2);
topicPartitionCounts.put("validTopic2", 3);
retainTopics.set(Set.of(
"validTopic1",
"validTopic2"));
topicIds.put("validTopic1", Uuid.randomUuid());
topicIds.put("validTopic2", Uuid.randomUuid());
MetadataResponse metadataResponse =
RequestTestUtils.metadataUpdateWithIds(clusterId, nodes, Collections.emptyMap(), topicPartitionCounts, _tp -> 100, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, true, time.milliseconds());
Map<String, Uuid> metadataTopicIds1 = metadata.topicIds();
retainTopics.get().forEach(topic -> assertEquals(metadataTopicIds1.get(topic), topicIds.get(topic)));
// Try removing the topic ID from keepValidTopic (simulating receiving a request from a controller with an older IBP)
topicIds.remove("validTopic1");
metadataResponse = RequestTestUtils.metadataUpdateWithIds(clusterId, nodes, Collections.emptyMap(), topicPartitionCounts, _tp -> 200, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, true, time.milliseconds());
Map<String, Uuid> metadataTopicIds2 = metadata.topicIds();
retainTopics.get().forEach(topic -> assertEquals(metadataTopicIds2.get(topic), topicIds.get(topic)));
Cluster cluster = metadata.fetch();
// We still have the topic, but it just doesn't have an ID.
assertEquals(Set.of("validTopic1", "validTopic2"), cluster.topics());
assertEquals(2, cluster.partitionsForTopic("validTopic1").size());
assertEquals(new HashSet<>(topicIds.values()), new HashSet<>(cluster.topicIds()));
assertEquals(Uuid.ZERO_UUID, cluster.topicId("validTopic1"));
}
@Test
public void testTopicMetadataOnUpdatePartitionLeadership() {
String topic = "input-topic";
Uuid topicId = Uuid.randomUuid();
Time time = new MockTime();
metadata = new Metadata(
refreshBackoffMs,
refreshBackoffMaxMs,
metadataExpireMs,
new LogContext(),
new ClusterResourceListeners());
Node node1 = new Node(1, "localhost", 9091);
Node node2 = new Node(2, "localhost", 9091);
TopicPartition tp0 = new TopicPartition(topic, 0);
MetadataResponse.PartitionMetadata partition0 = new MetadataResponse.PartitionMetadata(
Errors.NONE,
tp0,
Optional.of(1),
Optional.of(1),
Arrays.asList(1, 2),
Arrays.asList(1, 2),
Collections.emptyList()
);
TopicPartition tp1 = new TopicPartition(topic, 1);
MetadataResponse.PartitionMetadata partition1 =
new MetadataResponse.PartitionMetadata(
Errors.NONE,
tp1,
Optional.of(1),
Optional.of(1),
Arrays.asList(1, 2),
Arrays.asList(1, 2),
Collections.emptyList()
);
MetadataResponse.TopicMetadata topicMetadata = new MetadataResponse.TopicMetadata(
Errors.NONE,
topic,
topicId,
false,
Arrays.asList(partition0, partition1),
MetadataResponse.AUTHORIZED_OPERATIONS_OMITTED
);
// Initialize metadata with two partitions
MetadataResponse response = RequestTestUtils.metadataResponse(
Arrays.asList(node1, node2),
"clusterId",
node1.id(),
Collections.singletonList(topicMetadata));
metadata.updateWithCurrentRequestVersion(
response,
false,
time.milliseconds());
assertEquals(2, metadata.fetch().partitionsForTopic(topic).size());
assertEquals(1, metadata.fetch().partition(tp0).leader().id());
assertEquals(1, metadata.fetch().partition(tp1).leader().id());
// "input-topic" partition 1 leader changes from node 1 to node 2
metadata.updatePartitionLeadership(
Collections.singletonMap(
tp1,
new Metadata.LeaderIdAndEpoch(
Optional.of(2),
Optional.of(3)
)),
Collections.singletonList(node1)
);
assertEquals(2, metadata.fetch().partitionsForTopic(topic).size());
assertEquals(1, metadata.fetch().partition(tp0).leader().id());
assertEquals(2, metadata.fetch().partition(tp1).leader().id());
}
/**
* Test that concurrently updating Metadata, and fetching the corresponding MetadataSnapshot & Cluster work as expected, i.e.
* snapshot & cluster contain the relevant updates.
*/
@Test
public void testConcurrentUpdateAndFetchForSnapshotAndCluster() throws InterruptedException {
Time time = new MockTime();
metadata = new Metadata(refreshBackoffMs, refreshBackoffMaxMs, metadataExpireMs, new LogContext(), new ClusterResourceListeners());
// Setup metadata with 10 nodes, 2 topics, topic1 & 2, both to be retained in the update. Both will have leader-epoch 100.
int oldNodeCount = 10;
String topic1 = "test_topic1";
String topic2 = "test_topic2";
TopicPartition topic1Part0 = new TopicPartition(topic1, 0);
Map<String, Integer> topicPartitionCounts = new HashMap<>();
int oldPartitionCount = 1;
topicPartitionCounts.put(topic1, oldPartitionCount);
topicPartitionCounts.put(topic2, oldPartitionCount);
Map<String, Uuid> topicIds = new HashMap<>();
topicIds.put(topic1, Uuid.randomUuid());
topicIds.put(topic2, Uuid.randomUuid());
int oldLeaderEpoch = 100;
MetadataResponse metadataResponse =
RequestTestUtils.metadataUpdateWithIds("cluster", oldNodeCount, Collections.emptyMap(), topicPartitionCounts, _tp -> oldLeaderEpoch, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, true, time.milliseconds());
MetadataSnapshot snapshot = metadata.fetchMetadataSnapshot();
Cluster cluster = metadata.fetch();
// Validate metadata snapshot & cluster are setup as expected.
assertEquals(cluster, snapshot.cluster());
assertEquals(oldNodeCount, snapshot.cluster().nodes().size());
assertEquals(oldPartitionCount, snapshot.cluster().partitionCountForTopic(topic1));
assertEquals(oldPartitionCount, snapshot.cluster().partitionCountForTopic(topic2));
assertEquals(OptionalInt.of(oldLeaderEpoch), snapshot.leaderEpochFor(topic1Part0));
// Setup 6 threads, where 3 are updating metadata & 3 are reading snapshot/cluster.
// Metadata will be updated with higher # of nodes, partition-counts, leader-epoch.
int numThreads = 6;
ExecutorService service = Executors.newFixedThreadPool(numThreads);
CountDownLatch allThreadsDoneLatch = new CountDownLatch(numThreads);
CountDownLatch atleastMetadataUpdatedOnceLatch = new CountDownLatch(1);
AtomicReference<MetadataSnapshot> newSnapshot = new AtomicReference<>();
AtomicReference<Cluster> newCluster = new AtomicReference<>();
for (int i = 0; i < numThreads; i++) {
final int id = i + 1;
service.execute(() -> {
if (id % 2 == 0) { // Thread to update metadata.
String oldClusterId = "clusterId";
int nNodes = oldNodeCount + id;
Map<String, Integer> newTopicPartitionCounts = new HashMap<>();
newTopicPartitionCounts.put(topic1, oldPartitionCount + id);
newTopicPartitionCounts.put(topic2, oldPartitionCount + id);
MetadataResponse newMetadataResponse =
RequestTestUtils.metadataUpdateWithIds(oldClusterId, nNodes, Collections.emptyMap(), newTopicPartitionCounts, _tp -> oldLeaderEpoch + id, topicIds);
metadata.updateWithCurrentRequestVersion(newMetadataResponse, true, time.milliseconds());
atleastMetadataUpdatedOnceLatch.countDown();
} else { // Thread to read metadata snapshot, once its updated
assertTrue(assertDoesNotThrow(() -> atleastMetadataUpdatedOnceLatch.await(5, TimeUnit.MINUTES)));
newSnapshot.set(metadata.fetchMetadataSnapshot());
newCluster.set(metadata.fetch());
}
allThreadsDoneLatch.countDown();
});
}
assertTrue(allThreadsDoneLatch.await(5, TimeUnit.MINUTES));
// Validate new snapshot is upto-date. And has higher partition counts, nodes & leader epoch than earlier.
{
int newNodeCount = newSnapshot.get().cluster().nodes().size();
assertTrue(oldNodeCount < newNodeCount, "Unexpected value " + newNodeCount);
int newPartitionCountTopic1 = newSnapshot.get().cluster().partitionCountForTopic(topic1);
assertTrue(oldPartitionCount < newPartitionCountTopic1, "Unexpected value " + newPartitionCountTopic1);
int newPartitionCountTopic2 = newSnapshot.get().cluster().partitionCountForTopic(topic2);
assertTrue(oldPartitionCount < newPartitionCountTopic2, "Unexpected value " + newPartitionCountTopic2);
int newLeaderEpoch = newSnapshot.get().leaderEpochFor(topic1Part0).getAsInt();
assertTrue(oldLeaderEpoch < newLeaderEpoch, "Unexpected value " + newLeaderEpoch);
}
// Validate new cluster is upto-date. And has higher partition counts, nodes than earlier.
{
int newNodeCount = newCluster.get().nodes().size();
assertTrue(oldNodeCount < newNodeCount, "Unexpected value " + newNodeCount);
int newPartitionCountTopic1 = newCluster.get().partitionCountForTopic(topic1);
assertTrue(oldPartitionCount < newPartitionCountTopic1, "Unexpected value " + newPartitionCountTopic1);
int newPartitionCountTopic2 = newCluster.get()
.partitionCountForTopic(topic2);
assertTrue(oldPartitionCount < newPartitionCountTopic2, "Unexpected value " + newPartitionCountTopic2);
}
service.shutdown();
// Executor service should down much quickly, as all tasks are finished at this point.
assertTrue(service.awaitTermination(60, TimeUnit.SECONDS));
}
}
|
java
|
github
|
https://github.com/apache/kafka
|
clients/src/test/java/org/apache/kafka/clients/MetadataTest.java
|
#!/usr/bin/env python
from shogun import StreamingVwFile
from shogun import StreamingVwCacheFile
from shogun import T_SVMLIGHT
from shogun import StreamingVwFeatures
from shogun import VowpalWabbit
parameter_list=[['../data/fm_train_sparsereal.dat']]
def streaming_vw_createcache (fname):
# First creates a binary cache from an ascii data file.
# and then trains using the StreamingVwCacheFile as input
# Open the input file as a StreamingVwFile
input_file = StreamingVwFile(fname)
# Default file name will be vw_cache.dat.cache
input_file.set_write_to_cache(True)
# Tell VW that the file is in SVMLight format
# Supported types are T_DENSE, T_SVMLIGHT and T_VW
input_file.set_parser_type(T_SVMLIGHT)
## Create a StreamingVwFeatures object, `True' indicating the examples are labelled
#features = StreamingVwFeatures(input_file, True, 1024)
## Create a VW object from the features
#vw = VowpalWabbit(features)
#vw.set_no_training(True)
## Train (in this case does nothing but run over all examples)
#vw.train()
##Finally Train using the generated cache file
## Open the input cache file as a StreamingVwCacheFile
#input_file = StreamingVwCacheFile("vw_cache.dat.cache");
## The rest is exactly as for normal input
#features = StreamingVwFeatures(input_file, True, 1024);
#vw = VowpalWabbit(features)
#vw.train()
##return vw
if __name__ == "__main__":
streaming_vw_createcache(*parameter_list[0])
|
unknown
|
codeparrot/codeparrot-clean
| ||
#
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
'''
Fourier-transform blocks and related functions.
'''
import os
try:
from fft_swig import *
except ImportError:
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "..", "..", "swig"))
from fft_swig import *
|
unknown
|
codeparrot/codeparrot-clean
| ||
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.nxos import _nxos_ip_interface
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosIPInterfaceModule(TestNxosModule):
module = _nxos_ip_interface
def setUp(self):
super(TestNxosIPInterfaceModule, self).setUp()
self.mock_get_interface_mode = patch(
'ansible.modules.network.nxos._nxos_ip_interface.get_interface_mode')
self.get_interface_mode = self.mock_get_interface_mode.start()
self.mock_send_show_command = patch(
'ansible.modules.network.nxos._nxos_ip_interface.send_show_command')
self.send_show_command = self.mock_send_show_command.start()
self.mock_load_config = patch('ansible.modules.network.nxos._nxos_ip_interface.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_capabilities = patch('ansible.modules.network.nxos._nxos_ip_interface.get_capabilities')
self.get_capabilities = self.mock_get_capabilities.start()
self.get_capabilities.return_value = {'network_api': 'cliconf'}
def tearDown(self):
super(TestNxosIPInterfaceModule, self).tearDown()
self.mock_get_interface_mode.stop()
self.mock_send_show_command.stop()
self.mock_load_config.stop()
self.mock_get_capabilities.stop()
def load_fixtures(self, commands=None, device=''):
self.get_interface_mode.return_value = 'layer3'
self.send_show_command.return_value = [load_fixture('', '_nxos_ip_interface.cfg')]
self.load_config.return_value = None
def test_nxos_ip_interface_ip_present(self):
set_module_args(dict(interface='eth2/1', addr='1.1.1.2', mask=8))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'],
['interface eth2/1',
'no ip address 192.0.2.1/8',
'ip address 1.1.1.2/8'])
def test_nxos_ip_interface_ip_idempotent(self):
set_module_args(dict(interface='eth2/1', addr='192.0.2.1', mask=8))
result = self.execute_module(changed=False)
self.assertEqual(result['commands'], [])
def test_nxos_ip_interface_ip_absent(self):
set_module_args(dict(interface='eth2/1', state='absent',
addr='192.0.2.1', mask=8))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'],
['interface eth2/1', 'no ip address 192.0.2.1/8'])
|
unknown
|
codeparrot/codeparrot-clean
| ||
# stdlib
import unittest
# 3p
from nose.plugins.attrib import attr
# project
from checks import AgentCheck
from tests.checks.common import load_check
@attr(requires='ssh')
class SshTestCase(unittest.TestCase):
def test_ssh(self):
config = {
'instances': [{
'host': 'io.smashthestack.org',
'port': 22,
'username': 'level1',
'password': 'level1',
'sftp_check': False,
'private_key_file': '',
'add_missing_keys': True
}, {
'host': 'localhost',
'port': 22,
'username': 'test',
'password': 'yodawg',
'sftp_check': False,
'private_key_file': '',
'add_missing_keys': True
}, {
'host': 'wronghost',
'port': 22,
'username': 'datadog01',
'password': 'abcd',
'sftp_check': False,
'private_key_file': '',
'add_missing_keys': True
},
]
}
agentConfig = {}
self.check = load_check('ssh_check', config, agentConfig)
#Testing that connection will work
self.check.check(config['instances'][0])
service = self.check.get_service_checks()
self.assertEqual(service[0].get('status'), AgentCheck.OK)
self.assertEqual(service[0].get('message'), None)
self.assertEqual(service[0].get('tags'), ["instance:io.smashthestack.org-22"])
#Testing that bad authentication will raise exception
self.assertRaises(Exception, self.check.check, config['instances'][1])
#Testing that bad hostname will raise exception
self.assertRaises(Exception, self.check.check, config['instances'][2])
service_fail = self.check.get_service_checks()
#Check failure status
self.assertEqual(service_fail[0].get('status'), AgentCheck.CRITICAL)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# coding=utf-8
from unittest import TestCase
import test_mocks as mocks
from descriptor_tools.decorators import ForcedSet, SecretSet, Binding
from descriptor_tools.set_attrs import *
prefix = "_"
postfix = "_attr"
testString = "aString"
class Mangle_Name_Test(TestCase):
def test_defaults(self):
self.assertEqual(mangle_name(testString), testString)
def test_prefix(self):
self.assertEqual(mangle_name(testString, prefix=prefix),
prefix + testString)
def test_postfix(self):
self.assertEqual(mangle_name(testString, postfix=postfix),
testString + postfix)
def test_both(self):
self.assertEqual(mangle_name(testString, prefix=prefix, postfix=postfix),
prefix + testString + postfix)
class Name_Mangler_Test(TestCase):
def test_defaults(self):
mangle = NameMangler()
self.assertEqual(mangle(testString), testString)
def test_prefix(self):
mangle = NameMangler(prefix=prefix)
self.assertEqual(mangle(testString), prefix + testString)
def test_postfix(self):
mangle = NameMangler(postfix=postfix)
self.assertEqual(mangle(testString), testString + postfix)
def test_both(self):
mangle = NameMangler(prefix=prefix, postfix=postfix)
self.assertEqual(mangle(testString), prefix + testString + postfix)
class Set_On_Test(TestCase):
class Class:
pass
def test_set_on(self):
instance = Set_On_Test.Class()
attr = "someAttribute"
value = 0
set_on(instance, attr, value)
self.assertEqual(instance.__dict__[attr], value)
attrname = mocks.attrname
class Setters_Test(TestCase):
def test_basic_setting(self):
instance = mocks.ClassWithDescriptor(mocks.Descriptor())
Setter.basic(instance, attrname, 0, binding=False)
self.assertEqual(getattr(instance, attrname), 0)
def test_forced_setting(self):
instance = mocks.ClassWithDescriptor(ForcedSet(mocks.Descriptor()))
Setter.forced(instance, attrname, 0, binding=False)
self.assertEqual(getattr(instance, attrname), 0)
def test_secret_setting(self):
instance = mocks.ClassWithDescriptor(SecretSet(mocks.Descriptor()))
Setter.secret(instance, attrname, 0, binding=False)
self.assertEqual(getattr(instance, attrname), 0)
def test_basic_setting_with_binding_descriptor(self):
instance = mocks.ClassWithDescriptor(Binding(mocks.Descriptor()))
Setter.basic(instance, attrname, 0, binding=True)
self.assertEqual(getattr(instance, attrname), 0)
def test_forced_setting_with_binding_descriptor(self):
instance = mocks.ClassWithDescriptor(ForcedSet(Binding(
mocks.Descriptor())))
Setter.forced(instance, attrname, 0, binding=True)
self.assertEqual(getattr(instance, attrname), 0)
def test_secret_setting_with_binding_descriptor(self):
instance = mocks.ClassWithDescriptor(SecretSet(Binding(
mocks.Descriptor())))
Setter.secret(instance, attrname, 0, binding=True)
self.assertEqual(getattr(instance, attrname), 0)
class SetAttribute_Test(TestCase):
pass # TODO continue
class AttributeSetter_Test(TestCase):
pass # TODO
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python3
# Copyright (c) 2008-11 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. It is provided for educational
# purposes and is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import sys
def main():
maxwidth = 100
print_start()
count = 0
while True:
try:
line = input()
if count == 0:
color = "lightgreen"
elif count % 2:
color = "white"
else:
color = "lightyellow"
print_line(line, color, maxwidth)
count += 1
except EOFError:
break
print_end()
def print_start():
print("<table border='1'>")
def print_line(line, color, maxwidth):
print("<tr bgcolor='{0}'>".format(color))
fields = extract_fields(line)
for field in fields:
if not field:
print("<td></td>")
else:
number = field.replace(",", "")
try:
x = float(number)
print("<td align='right'>{0:d}</td>".format(round(x)))
except ValueError:
field = field.title()
field = field.replace(" And ", " and ")
if len(field) <= maxwidth:
field = escape_html(field)
else:
field = "{0} ...".format(
escape_html(field[:maxwidth]))
print("<td>{0}</td>".format(field))
print("</tr>")
def extract_fields(line):
fields = []
field = ""
quote = None
for c in line:
if c in "\"'":
if quote is None: # start of quoted string
quote = c
elif quote == c: # end of quoted string
quote = None
else:
field += c # other quote inside quoted string
continue
if quote is None and c == ",": # end of a field
fields.append(field)
field = ""
else:
field += c # accumulating a field
if field:
fields.append(field) # adding the last field
return fields
def escape_html(text):
text = text.replace("&", "&")
text = text.replace("<", "<")
text = text.replace(">", ">")
return text
def print_end():
print("</table>")
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime_test
import (
"testing"
)
var res int64
var ures uint64
// TODO: This test probably should be in a different place.
func TestFloatTruncation(t *testing.T) {
testdata := []struct {
input float64
convInt64 int64
convUInt64 uint64
overflow bool
}{
// max +- 1
{
input: 0x7fffffffffffffff,
convInt64: 0x7fffffffffffffff,
convUInt64: 0x8000000000000000,
},
// For out-of-bounds conversion, the result is implementation-dependent.
// This test verifies the implementation of wasm architecture, which is,
// saturating to the min/max value.
{
input: 0x8000000000000000,
convInt64: 0x7fffffffffffffff,
convUInt64: 0x8000000000000000,
},
{
input: 0x7ffffffffffffffe,
convInt64: 0x7fffffffffffffff,
convUInt64: 0x8000000000000000,
},
// neg max +- 1
{
input: -0x8000000000000000,
convInt64: -0x8000000000000000,
convUInt64: 0,
},
{
input: -0x8000000000000001,
convInt64: -0x8000000000000000,
convUInt64: 0,
},
{
input: -0x7fffffffffffffff,
convInt64: -0x8000000000000000,
convUInt64: 0,
},
// trunc point +- 1
{
input: 0x7ffffffffffffdff,
convInt64: 0x7ffffffffffffc00,
convUInt64: 0x7ffffffffffffc00,
},
{
input: 0x7ffffffffffffe00,
convInt64: 0x7fffffffffffffff,
convUInt64: 0x8000000000000000,
},
{
input: 0x7ffffffffffffdfe,
convInt64: 0x7ffffffffffffc00,
convUInt64: 0x7ffffffffffffc00,
},
// neg trunc point +- 1
{
input: -0x7ffffffffffffdff,
convInt64: -0x7ffffffffffffc00,
convUInt64: 0,
},
{
input: -0x7ffffffffffffe00,
convInt64: -0x8000000000000000,
convUInt64: 0,
},
{
input: -0x7ffffffffffffdfe,
convInt64: -0x7ffffffffffffc00,
convUInt64: 0,
},
// umax +- 1
{
input: 0xffffffffffffffff,
convInt64: 0x7fffffffffffffff,
convUInt64: 0xffffffffffffffff,
},
{
input: 0x10000000000000000,
convInt64: 0x7fffffffffffffff,
convUInt64: 0xffffffffffffffff,
},
{
input: 0xfffffffffffffffe,
convInt64: 0x7fffffffffffffff,
convUInt64: 0xffffffffffffffff,
},
// umax trunc +- 1
{
input: 0xfffffffffffffbff,
convInt64: 0x7fffffffffffffff,
convUInt64: 0xfffffffffffff800,
},
{
input: 0xfffffffffffffc00,
convInt64: 0x7fffffffffffffff,
convUInt64: 0xffffffffffffffff,
},
{
input: 0xfffffffffffffbfe,
convInt64: 0x7fffffffffffffff,
convUInt64: 0xfffffffffffff800,
},
}
for _, item := range testdata {
if got, want := int64(item.input), item.convInt64; got != want {
t.Errorf("int64(%f): got %x, want %x", item.input, got, want)
}
if got, want := uint64(item.input), item.convUInt64; got != want {
t.Errorf("uint64(%f): got %x, want %x", item.input, got, want)
}
}
}
|
go
|
github
|
https://github.com/golang/go
|
src/runtime/conv_wasm_test.go
|
class Param(object):
mapping = {}
def __init__(self, node):
if node.tag != 'param':
raise ValueError('expected <param>, got <%s>' % node.tag)
self.name = node.attrib['name']
try:
descnode = node.find('description')
self.description = '' if descnode is None else descnode.text
except AttributeError:
self.description = ''
self.dtype = node.attrib['type']
self.default = node.attrib.get('default', None)
self.skip = node.attrib.get('skip', 'false').lower() in ('true', 'yes', '1')
self.write_in = True
self.write_out = True
def mandatory(self):
return self.default is None
def optional(self):
return self.default is not None
def ctype(self):
return self.dtype
def ctype_normalized(self):
return self.ctype().replace('::', '__')
def htype(self):
return self.dtype
def cdefault(self):
return self.default
def hdefault(self):
return self.default
def argmod(self):
return ''
@staticmethod
def register_type(dtype, clazz):
Param.mapping[dtype] = clazz
@staticmethod
def factory(node):
dtype = node.attrib['type']
if dtype not in Param.mapping:
print('ERROR: type "{}" not found in mapping; valid types are: {}'.format(dtype, ', '.join('"%s"' % k for k in Param.mapping.keys())))
return Param.mapping[dtype](node)
def declaration(self):
return '{} {}'.format(self.ctype(), self.name)
def declaration_with_default(self):
return self.declaration() + (' = {}'.format(self.cdefault()) if self.cdefault() else '')
class ParamInt(Param):
def __init__(self, node):
super(ParamInt, self).__init__(node)
def htype(self):
return 'number'
class ParamFloat(Param):
def __init__(self, node):
super(ParamFloat, self).__init__(node)
def htype(self):
return 'number'
class ParamDouble(Param):
def __init__(self, node):
super(ParamDouble, self).__init__(node)
def htype(self):
return 'number'
class ParamString(Param):
def __init__(self, node):
super(ParamString, self).__init__(node)
def cdefault(self):
if self.default is None: return None
return '"%s"' % self.default.replace('\\','\\\\').replace('"','\\"')
def ctype(self):
return 'std::string'
def hdefault(self):
if self.default is None: return None
return "'%s'" % self.default.replace('\\','\\\\').replace('"','\\"')
class ParamBool(Param):
def __init__(self, node):
super(ParamBool, self).__init__(node)
class ParamTable(Param):
def __init__(self, node):
super(ParamTable, self).__init__(node)
self.itype = node.attrib.get('item-type', None)
self.minsize = int(node.attrib.get('minsize', 0))
self.maxsize = int(node.attrib['maxsize']) if 'maxsize' in node.attrib else None
if 'size' in node.attrib:
self.minsize = int(node.attrib['size'])
self.maxsize = int(node.attrib['size'])
if self.itype is None:
self.write_in = False
self.write_out = False
def item_dummy(self):
n = type('dummyNode', (object,), dict(tag='param', attrib={'name': 'dummy', 'type': self.itype}))
return Param.factory(n)
def ctype(self):
if self.itype is not None:
return 'std::vector<%s>' % self.item_dummy().ctype()
else:
return 'void *'
def ctype_normalized(self):
return self.item_dummy().ctype().replace('::', '__')
def htype(self):
return 'table' + ('_%d' % self.minsize if self.minsize else '')
def cdefault(self):
if self.default is not None:
d = self.default
d = 'boost::assign::list_of{}.convert_to_container<{} >()'.format(''.join(map(lambda x: '(%s)' % x.strip(), d.strip()[1:-1].split(','))), self.ctype())
return d
class ParamStruct(Param):
def __init__(self, node, name):
super(ParamStruct, self).__init__(node)
self.structname = name
self.xoptional = False
if self.default is not None:
if self.default == '{}':
self.xoptional = True
else:
raise ValueError('default value not supported in <struct>')
def mandatory(self):
return not self.xoptional
def optional(self):
return self.xoptional
def cdefault(self):
return None
def argmod(self):
return '&'
Param.register_type('int', ParamInt)
Param.register_type('float', ParamFloat)
Param.register_type('double', ParamDouble)
Param.register_type('string', ParamString)
Param.register_type('bool', ParamBool)
Param.register_type('table', ParamTable)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# DS18x20 temperature sensor driver for MicroPython.
# MIT license; Copyright (c) 2016 Damien P. George
from micropython import const
_CONVERT = const(0x44)
_RD_SCRATCH = const(0xBE)
_WR_SCRATCH = const(0x4E)
class DS18X20:
def __init__(self, onewire):
self.ow = onewire
self.buf = bytearray(9)
def scan(self):
return [rom for rom in self.ow.scan() if rom[0] in (0x10, 0x22, 0x28)]
def convert_temp(self):
self.ow.reset(True)
self.ow.writebyte(self.ow.SKIP_ROM)
self.ow.writebyte(_CONVERT)
def read_scratch(self, rom):
self.ow.reset(True)
self.ow.select_rom(rom)
self.ow.writebyte(_RD_SCRATCH)
self.ow.readinto(self.buf)
if self.ow.crc8(self.buf):
raise Exception("CRC error")
return self.buf
def write_scratch(self, rom, buf):
self.ow.reset(True)
self.ow.select_rom(rom)
self.ow.writebyte(_WR_SCRATCH)
self.ow.write(buf)
def read_temp(self, rom):
buf = self.read_scratch(rom)
if rom[0] == 0x10:
if buf[1]:
t = buf[0] >> 1 | 0x80
t = -((~t + 1) & 0xFF)
else:
t = buf[0] >> 1
return t - 0.25 + (buf[7] - buf[6]) / buf[7]
else:
t = buf[1] << 8 | buf[0]
if t & 0x8000: # sign bit set
t = -((t ^ 0xFFFF) + 1)
return t / 16
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.volumes.volume_types.qos_specs \
import forms as project_forms
from openstack_dashboard.dashboards.admin.volumes.volume_types.qos_specs \
import tables as project_tables
class QosSpecMixin(object):
@memoized
def get_context_data(self, **kwargs):
context = super(QosSpecMixin, self).get_context_data(**kwargs)
# Note the use of self.kwargs instead of the parameter kwargs.
# This is needed for consistency when dealing with both
# index views and forms (i,e, EditView). With forms,
# the parameter kwargs contains form layout information,
# not key data that is needed here.
if 'key' in self.kwargs:
# needed for edit function
context['key'] = self.kwargs['key']
if 'qos_spec_id' in self.kwargs:
context['qos_spec_id'] = self.kwargs['qos_spec_id']
qos_spec_id = context['qos_spec_id']
try:
qos_list = api.cinder.qos_spec_get(self.request, qos_spec_id)
context['qos_spec_name'] = qos_list.name
except Exception:
context['qos_spec_name'] = _('undefined')
return context
class IndexView(QosSpecMixin, forms.ModalFormMixin, tables.DataTableView):
table_class = project_tables.SpecsTable
template_name = 'admin/volumes/volume_types/qos_specs/index.html'
page_title = _("QoS Spec: {{ qos_spec_name }}")
def get_data(self):
try:
qos_spec_id = self.kwargs['qos_spec_id']
qos_list = api.cinder.qos_spec_get_keys(self.request, qos_spec_id)
except Exception:
qos_list = []
exceptions.handle(self.request,
_('Unable to retrieve QoS spec list.'))
return qos_list
class CreateKeyValuePairView(QosSpecMixin, forms.ModalFormView):
# this for creating a spec key-value pair for an existing QOS Spec
form_class = project_forms.CreateKeyValuePair
form_id = "extra_spec_create_form"
modal_header = _("Create Spec")
modal_id = "qos_spec_create_modal"
template_name = 'admin/volumes/volume_types/qos_specs/create.html'
submit_label = _("Create")
submit_url = "horizon:admin:volumes:volume_types:qos_specs:create"
url = 'horizon:admin:volumes:volume_types_tab'
page_title = _("Spec: {{ qos_spec_name }}")
def get_initial(self):
qos_spec_id = self.kwargs['qos_spec_id']
return {'qos_spec_id': qos_spec_id}
def get_success_url(self):
return reverse(self.url)
def get_context_data(self, **kwargs):
context = super(CreateKeyValuePairView, self).\
get_context_data(**kwargs)
args = (self.kwargs['qos_spec_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
context['cancel_url'] = reverse(self.url)
return context
class EditKeyValuePairView(QosSpecMixin, forms.ModalFormView):
form_class = project_forms.EditKeyValuePair
form_id = "qos_spec_edit_form"
modal_header = _("Edit Spec Value")
modal_id = "qos_spec_edit_modal"
template_name = 'admin/volumes/volume_types/qos_specs/edit.html'
submit_label = _("Save")
submit_url = "horizon:admin:volumes:volume_types:qos_specs:edit"
url = 'horizon:admin:volumes:volume_types_tab'
page_title = _("QoS Spec: {{ qos_spec_name }}")
def get_success_url(self):
return reverse(self.url)
def get_initial(self):
qos_spec_id = self.kwargs['qos_spec_id']
key = self.kwargs['key']
try:
qos_specs = api.cinder.qos_spec_get(self.request, qos_spec_id)
except Exception:
qos_specs = {}
exceptions.handle(self.request,
_('Unable to retrieve QoS spec '
'details.'))
return {'qos_spec_id': qos_spec_id,
'key': key,
'value': qos_specs.specs.get(key, '')}
def get_context_data(self, **kwargs):
context = super(EditKeyValuePairView, self).get_context_data(**kwargs)
args = (self.kwargs['qos_spec_id'], self.kwargs['key'],)
context['submit_url'] = reverse(self.submit_url, args=args)
context['cancel_url'] = reverse(self.url)
return context
|
unknown
|
codeparrot/codeparrot-clean
| ||
#pragma once
#include <ATen/core/Tensor.h>
namespace at::native::mobile {
Tensor allocate_padded_contiguous_if_needed(
const Tensor& input,
c10::MemoryFormat memory_format);
// TODO: Remove this function when at::native::empty() is modified to accept a
// custom memory allocator.
at::Tensor empty_with_tail_padding(
IntArrayRef size,
const caffe2::TypeMeta dtype,
c10::MemoryFormat memory_format,
std::optional<DimnameList> maybe_names);
} // namespace at
|
c
|
github
|
https://github.com/pytorch/pytorch
|
aten/src/ATen/native/utils/Factory.h
|
#!/usr/bin/env python3
import json
from pathlib import Path
from os import getenv
from sys import argv
if len(argv) != 2:
print("JSON info files script requires ouput file as argument")
exit(1)
output_path = Path(argv[1])
assert getenv("WORK_DIR"), "$WORK_DIR required"
work_dir = Path(getenv("WORK_DIR"))
output = {}
for json_file in work_dir.glob("*.json"):
image_info = json.loads(json_file.read_text())
if not output:
output.update(image_info)
else:
# get first (and only) profile in json file
device_id = next(iter(image_info["profiles"].keys()))
if device_id not in output["profiles"]:
output["profiles"].update(image_info["profiles"])
else:
output["profiles"][device_id]["images"].append(
image_info["profiles"][device_id]["images"][0]
)
if output:
output_path.write_text(json.dumps(output, sort_keys=True, separators=(",", ":")))
else:
print("JSON info file script could not find any JSON files for target")
|
unknown
|
codeparrot/codeparrot-clean
| ||
from tests.api import auth_for
from tests.data import users
def test_delete_user_as_user(client, test_user):
res = client.delete(
"/users/{id}".format(id=test_user.id), headers=auth_for(test_user)
)
assert res.status_code == 403
def test_delete_user_as_admin(client, test_user, test_admin):
res = client.get("/users/{id}".format(id=test_user.id))
assert res.status_code == 200
res = client.delete(
"/users/{id}".format(id=test_user.id), headers=auth_for(test_admin)
)
assert res.status_code == 200
assert res.json == {}
res = client.get("/users/{id}".format(id=test_user.id))
assert res.status_code == 404
def test_delete_user_as_other_user(db_session, client, test_user):
john = users.john()
db_session.add(john)
db_session.commit()
res = client.delete("/users/{id}".format(id=test_user.id), headers=auth_for(john))
assert res.status_code == 403
def test_delete_missing_user(client, test_admin):
res = client.delete("/users/12345", headers=auth_for(test_admin))
assert res.status_code == 404
def test_delete_user_with_invalid_id(client, test_admin):
res = client.delete("/users/abc", headers=auth_for(test_admin))
assert res.status_code == 404
def test_delete_user_without_authentication(client, test_user):
res = client.delete("/users/{id}".format(id=test_user.id))
assert res.status_code == 401
def test_delete_user_invalidates_auth_tokens(client, test_user, test_admin):
res = client.get("/settings".format(id=test_user.id), headers=auth_for(test_user))
assert res.status_code == 200
res = client.delete(
"/users/{id}".format(id=test_user.id), headers=auth_for(test_admin)
)
assert res.status_code == 200
assert res.json == {}
res = client.get("/settings".format(id=test_user.id), headers=auth_for(test_user))
assert res.status_code == 401
|
unknown
|
codeparrot/codeparrot-clean
| ||
import random
import sys
import Pyro4
from Pyro4.util import SerializerBase
from workitem import Workitem
# For 'workitem.Workitem' we register a deserialization hook to be able to get these back from Pyro
SerializerBase.register_dict_to_class("workitem.Workitem", Workitem.from_dict)
NUMBER_OF_ITEMS = 40
def main():
print("\nThis program will calculate Prime Factorials of a bunch of random numbers.")
print("The more workers you will start (on different cpus/cores/machines),")
print("the faster you will get the complete list of results!\n")
with Pyro4.core.Proxy("PYRONAME:example.distributed.dispatcher") as dispatcher:
placework(dispatcher)
numbers = collectresults(dispatcher)
printresults(numbers)
def placework(dispatcher):
print("placing work items into dispatcher queue.")
for i in range(NUMBER_OF_ITEMS):
if sys.version_info < (3, 0):
# python 2.x range arguments needs to be within C int range
number = random.randint(3211, 12000) * random.randint(3211, 11000)
else:
# python 3.x allows arbitrary size range
number = random.randint(3211, 4999999) * random.randint(3211, 999999)
item = Workitem(i + 1, number)
dispatcher.putWork(item)
def collectresults(dispatcher):
print("getting results from dispatcher queue.")
numbers = {}
while len(numbers) < NUMBER_OF_ITEMS:
try:
item = dispatcher.getResult()
except ValueError:
print("Not all results available yet (got %d out of %d). Work queue size: %d" %
(len(numbers), NUMBER_OF_ITEMS, dispatcher.workQueueSize()))
else:
print("Got result: %s (from %s)" % (item, item.processedBy))
numbers[item.data] = item.result
if dispatcher.resultQueueSize() > 0:
print("there's still stuff in the dispatcher result queue, that is odd...")
return numbers
def printresults(numbers):
print("\nComputed Prime Factorials follow:")
for (number, factorials) in numbers.items():
print("%d --> %s" % (number, factorials))
if __name__ == "__main__":
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*global DateTimeShortcuts, SelectFilter*/
/**
* Django admin inlines
*
* Based on jQuery Formset 1.1
* @author Stanislaus Madueke (stan DOT madueke AT gmail DOT com)
* @requires jQuery 1.2.6 or later
*
* Copyright (c) 2009, Stanislaus Madueke
* All rights reserved.
*
* Spiced up with Code from Zain Memon's GSoC project 2009
* and modified for Django by Jannis Leidel, Travis Swicegood and Julien Phalip.
*
* Licensed under the New BSD License
* See: https://opensource.org/licenses/bsd-license.php
*/
'use strict';
{
const $ = django.jQuery;
$.fn.formset = function(opts) {
const options = $.extend({}, $.fn.formset.defaults, opts);
const $this = $(this);
const $parent = $this.parent();
const updateElementIndex = function(el, prefix, ndx) {
const id_regex = new RegExp("(" + prefix + "-(\\d+|__prefix__))");
const replacement = prefix + "-" + ndx;
if ($(el).prop("for")) {
$(el).prop("for", $(el).prop("for").replace(id_regex, replacement));
}
if (el.id) {
el.id = el.id.replace(id_regex, replacement);
}
if (el.name) {
el.name = el.name.replace(id_regex, replacement);
}
};
const totalForms = $("#id_" + options.prefix + "-TOTAL_FORMS").prop("autocomplete", "off");
let nextIndex = parseInt(totalForms.val(), 10);
const maxForms = $("#id_" + options.prefix + "-MAX_NUM_FORMS").prop("autocomplete", "off");
const minForms = $("#id_" + options.prefix + "-MIN_NUM_FORMS").prop("autocomplete", "off");
let addButton;
/**
* The "Add another MyModel" button below the inline forms.
*/
const addInlineAddButton = function() {
if (addButton === null) {
if ($this.prop("tagName") === "TR") {
// If forms are laid out as table rows, insert the
// "add" button in a new table row:
const numCols = $this.eq(-1).children().length;
$parent.append('<tr class="' + options.addCssClass + '"><td colspan="' + numCols + '"><a role="button" class="addlink" href="#">' + options.addText + "</a></tr>");
addButton = $parent.find("tr:last a");
} else {
// Otherwise, insert it immediately after the last form:
$this.filter(":last").after('<div class="' + options.addCssClass + '"><a role="button" class="addlink" href="#">' + options.addText + "</a></div>");
addButton = $this.filter(":last").next().find("a");
}
}
addButton.on('click', addInlineClickHandler);
};
const addInlineClickHandler = function(e) {
e.preventDefault();
const template = $("#" + options.prefix + "-empty");
const row = template.clone(true);
row.removeClass(options.emptyCssClass)
.addClass(options.formCssClass)
.attr("id", options.prefix + "-" + nextIndex);
addInlineDeleteButton(row);
row.find("*").each(function() {
updateElementIndex(this, options.prefix, totalForms.val());
});
// Insert the new form when it has been fully edited.
row.insertBefore($(template));
// Update number of total forms.
$(totalForms).val(parseInt(totalForms.val(), 10) + 1);
nextIndex += 1;
// Hide the add button if there's a limit and it's been reached.
if ((maxForms.val() !== '') && (maxForms.val() - totalForms.val()) <= 0) {
addButton.parent().hide();
}
// Show the remove buttons if there are more than min_num.
toggleDeleteButtonVisibility(row.closest('.inline-group'));
// Pass the new form to the post-add callback, if provided.
if (options.added) {
options.added(row);
}
row.get(0).dispatchEvent(new CustomEvent("formset:added", {
bubbles: true,
detail: {
formsetName: options.prefix
}
}));
};
/**
* The "X" button that is part of every unsaved inline.
* (When saved, it is replaced with a "Delete" checkbox.)
*/
const addInlineDeleteButton = function(row) {
if (row.is("tr")) {
// If the forms are laid out in table rows, insert
// the remove button into the last table cell:
row.children(":last").append('<div><a role="button" class="' + options.deleteCssClass + '" href="#">' + options.deleteText + "</a></div>");
} else if (row.is("ul") || row.is("ol")) {
// If they're laid out as an ordered/unordered list,
// insert an <li> after the last list item:
row.append('<li><a role="button" class="' + options.deleteCssClass + '" href="#">' + options.deleteText + "</a></li>");
} else {
// Otherwise, just insert the remove button as the
// last child element of the form's container:
row.children(":first").append('<span><a role="button" class="' + options.deleteCssClass + '" href="#">' + options.deleteText + "</a></span>");
}
// Add delete handler for each row.
row.find("a." + options.deleteCssClass).on('click', inlineDeleteHandler.bind(this));
};
const inlineDeleteHandler = function(e1) {
e1.preventDefault();
const deleteButton = $(e1.target);
const row = deleteButton.closest('.' + options.formCssClass);
const inlineGroup = row.closest('.inline-group');
// Remove the parent form containing this button,
// and also remove the relevant row with non-field errors:
const prevRow = row.prev();
if (prevRow.length && prevRow.hasClass('row-form-errors')) {
prevRow.remove();
}
row.remove();
nextIndex -= 1;
// Pass the deleted form to the post-delete callback, if provided.
if (options.removed) {
options.removed(row);
}
document.dispatchEvent(new CustomEvent("formset:removed", {
detail: {
formsetName: options.prefix
}
}));
// Update the TOTAL_FORMS form count.
const forms = $("." + options.formCssClass);
$("#id_" + options.prefix + "-TOTAL_FORMS").val(forms.length);
// Show add button again once below maximum number.
if ((maxForms.val() === '') || (maxForms.val() - forms.length) > 0) {
addButton.parent().show();
}
// Hide the remove buttons if at min_num.
toggleDeleteButtonVisibility(inlineGroup);
// Also, update names and ids for all remaining form controls so
// they remain in sequence:
let i, formCount;
const updateElementCallback = function() {
updateElementIndex(this, options.prefix, i);
};
for (i = 0, formCount = forms.length; i < formCount; i++) {
updateElementIndex($(forms).get(i), options.prefix, i);
$(forms.get(i)).find("*").each(updateElementCallback);
}
};
const toggleDeleteButtonVisibility = function(inlineGroup) {
if ((minForms.val() !== '') && (minForms.val() - totalForms.val()) >= 0) {
inlineGroup.find('.inline-deletelink').hide();
} else {
inlineGroup.find('.inline-deletelink').show();
}
};
$this.each(function(i) {
$(this).not("." + options.emptyCssClass).addClass(options.formCssClass);
});
// Create the delete buttons for all unsaved inlines:
$this.filter('.' + options.formCssClass + ':not(.has_original):not(.' + options.emptyCssClass + ')').each(function() {
addInlineDeleteButton($(this));
});
toggleDeleteButtonVisibility($this);
// Create the add button, initially hidden.
addButton = options.addButton;
addInlineAddButton();
// Show the add button if allowed to add more items.
// Note that max_num = None translates to a blank string.
const showAddButton = maxForms.val() === '' || (maxForms.val() - totalForms.val()) > 0;
if ($this.length && showAddButton) {
addButton.parent().show();
} else {
addButton.parent().hide();
}
return this;
};
/* Setup plugin defaults */
$.fn.formset.defaults = {
prefix: "form", // The form prefix for your django formset
addText: "add another", // Text for the add link
deleteText: "remove", // Text for the delete link
addCssClass: "add-row", // CSS class applied to the add link
deleteCssClass: "delete-row", // CSS class applied to the delete link
emptyCssClass: "empty-row", // CSS class applied to the empty row
formCssClass: "dynamic-form", // CSS class applied to each form in a formset
added: null, // Function called each time a new form is added
removed: null, // Function called each time a form is deleted
addButton: null // Existing add button to use
};
// Tabular inlines ---------------------------------------------------------
$.fn.tabularFormset = function(selector, options) {
const $rows = $(this);
const reinitDateTimeShortCuts = function() {
// Reinitialize the calendar and clock widgets by force
if (typeof DateTimeShortcuts !== "undefined") {
$(".datetimeshortcuts").remove();
DateTimeShortcuts.init();
}
};
const updateSelectFilter = function() {
// If any SelectFilter widgets are a part of the new form,
// instantiate a new SelectFilter instance for it.
if (typeof SelectFilter !== 'undefined') {
$('.selectfilter').each(function(index, value) {
SelectFilter.init(value.id, this.dataset.fieldName, false);
});
$('.selectfilterstacked').each(function(index, value) {
SelectFilter.init(value.id, this.dataset.fieldName, true);
});
}
};
const initPrepopulatedFields = function(row) {
row.find('.prepopulated_field').each(function() {
const field = $(this),
input = field.find('input, select, textarea'),
dependency_list = input.data('dependency_list') || [],
dependencies = [];
$.each(dependency_list, function(i, field_name) {
dependencies.push('#' + row.find('.field-' + field_name).find('input, select, textarea').attr('id'));
});
if (dependencies.length) {
input.prepopulate(dependencies, input.attr('maxlength'));
}
});
};
$rows.formset({
prefix: options.prefix,
addText: options.addText,
formCssClass: "dynamic-" + options.prefix,
deleteCssClass: "inline-deletelink",
deleteText: options.deleteText,
emptyCssClass: "empty-form",
added: function(row) {
initPrepopulatedFields(row);
reinitDateTimeShortCuts();
updateSelectFilter();
},
addButton: options.addButton
});
return $rows;
};
// Stacked inlines ---------------------------------------------------------
$.fn.stackedFormset = function(selector, options) {
const $rows = $(this);
const updateInlineLabel = function(row) {
$(selector).find(".inline_label").each(function(i) {
const count = i + 1;
$(this).html($(this).html().replace(/(#\d+)/g, "#" + count));
});
};
const reinitDateTimeShortCuts = function() {
// Reinitialize the calendar and clock widgets by force, yuck.
if (typeof DateTimeShortcuts !== "undefined") {
$(".datetimeshortcuts").remove();
DateTimeShortcuts.init();
}
};
const updateSelectFilter = function() {
// If any SelectFilter widgets were added, instantiate a new instance.
if (typeof SelectFilter !== "undefined") {
$(".selectfilter").each(function(index, value) {
SelectFilter.init(value.id, this.dataset.fieldName, false);
});
$(".selectfilterstacked").each(function(index, value) {
SelectFilter.init(value.id, this.dataset.fieldName, true);
});
}
};
const initPrepopulatedFields = function(row) {
row.find('.prepopulated_field').each(function() {
const field = $(this),
input = field.find('input, select, textarea'),
dependency_list = input.data('dependency_list') || [],
dependencies = [];
$.each(dependency_list, function(i, field_name) {
// Dependency in a fieldset.
let field_element = row.find('.form-row .field-' + field_name);
// Dependency without a fieldset.
if (!field_element.length) {
field_element = row.find('.form-row.field-' + field_name);
}
dependencies.push('#' + field_element.find('input, select, textarea').attr('id'));
});
if (dependencies.length) {
input.prepopulate(dependencies, input.attr('maxlength'));
}
});
};
$rows.formset({
prefix: options.prefix,
addText: options.addText,
formCssClass: "dynamic-" + options.prefix,
deleteCssClass: "inline-deletelink",
deleteText: options.deleteText,
emptyCssClass: "empty-form",
removed: updateInlineLabel,
added: function(row) {
initPrepopulatedFields(row);
reinitDateTimeShortCuts();
updateSelectFilter();
updateInlineLabel(row);
},
addButton: options.addButton
});
return $rows;
};
$(document).ready(function() {
$(".js-inline-admin-formset").each(function() {
const data = $(this).data(),
inlineOptions = data.inlineFormset;
let selector;
switch(data.inlineType) {
case "stacked":
selector = inlineOptions.name + "-group .inline-related";
$(selector).stackedFormset(selector, inlineOptions.options);
break;
case "tabular":
selector = inlineOptions.name + "-group .tabular.inline-related tbody:first > tr.form-row";
$(selector).tabularFormset(selector, inlineOptions.options);
break;
}
});
});
}
|
javascript
|
github
|
https://github.com/django/django
|
django/contrib/admin/static/admin/js/inlines.js
|
import json
import Queue as queue
import socket
import select
import threading
import time
import sys
from processor import Session, Dispatcher
from utils import print_log, logger
READ_ONLY = select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLERR
READ_WRITE = READ_ONLY | select.POLLOUT
TIMEOUT = 100
import ssl
class TcpSession(Session):
def __init__(self, dispatcher, poller, connection, address, use_ssl, ssl_certfile, ssl_keyfile):
Session.__init__(self, dispatcher)
self.use_ssl = use_ssl
self.poller = poller
self.raw_connection = connection
if use_ssl:
import ssl
self._connection = ssl.wrap_socket(
connection,
server_side=True,
certfile=ssl_certfile,
keyfile=ssl_keyfile,
ssl_version=ssl.PROTOCOL_SSLv23,
do_handshake_on_connect=False)
else:
self._connection = connection
self.address = address[0] + ":%d"%address[1]
self.name = "TCP " if not use_ssl else "SSL "
self.timeout = 1000
self.dispatcher.add_session(self)
self.response_queue = queue.Queue()
self.message = ''
self.retry_msg = ''
self.handshake = not self.use_ssl
def check_do_handshake(self):
if self.handshake:
return
try:
self._connection.do_handshake()
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
return
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
self.poller.modify(self.raw_connection, READ_WRITE)
return
else:
raise
self.poller.modify(self.raw_connection, READ_ONLY)
self.handshake = True
def connection(self):
if self.stopped():
raise Exception("Session was stopped")
else:
return self._connection
def shutdown(self):
try:
self._connection.shutdown(socket.SHUT_RDWR)
except:
# print_log("problem shutting down", self.address)
pass
self._connection.close()
def send_response(self, response):
try:
msg = json.dumps(response) + '\n'
except BaseException as e:
logger.error('send_response:' + str(e))
return
self.response_queue.put(msg)
try:
self.poller.modify(self.raw_connection, READ_WRITE)
except BaseException as e:
logger.error('send_response:' + str(e))
return
def parse_message(self):
message = self.message
self.time = time.time()
raw_buffer = message.find('\n')
if raw_buffer == -1:
return False
raw_command = message[0:raw_buffer].strip()
self.message = message[raw_buffer + 1:]
return raw_command
class TcpServer(threading.Thread):
def __init__(self, dispatcher, host, port, use_ssl, ssl_certfile, ssl_keyfile):
self.shared = dispatcher.shared
self.dispatcher = dispatcher.request_dispatcher
threading.Thread.__init__(self)
self.daemon = True
self.host = host
self.port = port
self.lock = threading.Lock()
self.use_ssl = use_ssl
self.ssl_keyfile = ssl_keyfile
self.ssl_certfile = ssl_certfile
self.fd_to_session = {}
self.buffer_size = 4096
def handle_command(self, raw_command, session):
try:
command = json.loads(raw_command)
except:
session.send_response({"error": "bad JSON"})
return True
try:
# Try to load vital fields, and return an error if
# unsuccessful.
message_id = command['id']
method = command['method']
except KeyError:
# Return an error JSON in response.
session.send_response({"error": "syntax error", "request": raw_command})
else:
self.dispatcher.push_request(session, command)
## sleep a bit to prevent a single session from DOSing the queue
#time.sleep(0.01)
def run(self):
for res in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, cannonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
sock.setblocking(0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except socket.error:
sock = None
continue
try:
sock.bind(sa)
sock.listen(5)
except socket.error:
sock.close()
sock = None
continue
break
host = sa[0]
if af == socket.AF_INET6:
host = "[%s]" % host
if sock is None:
print_log( "could not open " + ("SSL" if self.use_ssl else "TCP") + " socket on %s:%d" % (host, self.port))
return
print_log( ("SSL" if self.use_ssl else "TCP") + " server started on %s:%d" % (host, self.port))
sock_fd = sock.fileno()
poller = select.poll()
poller.register(sock)
def stop_session(fd):
try:
# unregister before we close s
poller.unregister(fd)
except BaseException as e:
logger.error('unregister error:' + str(e))
session = self.fd_to_session.pop(fd)
# this will close the socket
session.stop()
redo = []
while not self.shared.stopped():
if self.shared.paused():
sessions = self.fd_to_session.keys()
if sessions:
logger.info("closing %d sessions"%len(sessions))
for fd in sessions:
stop_session(fd)
time.sleep(1)
continue
if redo:
events = redo
redo = []
else:
events = poller.poll(TIMEOUT)
for fd, flag in events:
if fd != sock_fd:
session = self.fd_to_session[fd]
s = session._connection
try:
session.check_do_handshake()
except:
stop_session(fd)
continue
# handle inputs
if flag & (select.POLLIN | select.POLLPRI):
if fd == sock_fd:
connection, address = sock.accept()
try:
session = TcpSession(self.dispatcher, poller, connection, address,
use_ssl=self.use_ssl, ssl_certfile=self.ssl_certfile, ssl_keyfile=self.ssl_keyfile)
except BaseException as e:
logger.error("cannot start TCP session" + str(e) + ' ' + repr(address))
connection.close()
continue
connection = session._connection
connection.setblocking(0)
self.fd_to_session[ connection.fileno() ] = session
poller.register(connection, READ_ONLY)
try:
session.check_do_handshake()
except BaseException as e:
logger.error('handshake failure:' + str(e) + ' ' + repr(address))
stop_session(connection.fileno())
continue
try:
data = s.recv(self.buffer_size)
except ssl.SSLError as x:
if x.args[0] == ssl.SSL_ERROR_WANT_READ:
pass
else:
logger.error('SSL recv error:'+ repr(x))
continue
except socket.error as x:
if x.args[0] != 104:
logger.error('recv error: ' + repr(x))
stop_session(fd)
continue
if data:
if len(data) == self.buffer_size:
redo.append( (fd, flag) )
session.message += data
while True:
cmd = session.parse_message()
if not cmd:
break
if cmd == 'quit':
data = False
break
self.handle_command(cmd, session)
if not data:
stop_session(fd)
continue
elif flag & select.POLLHUP:
print_log('client hung up', address)
stop_session(fd)
elif flag & select.POLLOUT:
# Socket is ready to send data, if there is any to send.
if session.retry_msg:
next_msg = session.retry_msg
else:
try:
next_msg = session.response_queue.get_nowait()
except queue.Empty:
# No messages waiting so stop checking for writability.
poller.modify(s, READ_ONLY)
continue
try:
sent = s.send(next_msg)
except socket.error as x:
logger.error("recv error:" + str(x))
stop_session(fd)
continue
session.retry_msg = next_msg[sent:]
elif flag & select.POLLERR:
print_log('handling exceptional condition for', session.address)
stop_session(fd)
elif flag & select.POLLNVAL:
print_log('invalid request', session.address)
stop_session(fd)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#! /usr/bin/python
#-*- coding: utf-8 -*-
import unittest
import os
import sys
try:
import version
except:
from . import version
sys.path.append(os.path.join(os.getenv("PINGUINO_LIB"), "qtgui", "resources"))
try:
from qtgui.pinguino_core.pinguino import Pinguino, AllBoards
from qtgui.pinguino_core.pinguino_config import PinguinoConfig
from qtgui.pinguino_core.config import Config
except:
from .qtgui.pinguino_core.pinguino import Pinguino, AllBoards
from .qtgui.pinguino_core.pinguino_config import PinguinoConfig
from .qtgui.pinguino_core.config import Config
Pinguino = Pinguino()
PinguinoConfig.set_environ_vars()
PinguinoConfig.check_user_files()
config = Config()
PinguinoConfig.update_pinguino_paths(config, Pinguino)
PinguinoConfig.update_pinguino_extra_options(config, Pinguino)
PinguinoConfig.update_user_libs(Pinguino)
########################################################################
class TestPreprocess(unittest.TestCase):
#----------------------------------------------------------------------
def test_delete_comments(self):
cases = (
("//Pinguino Rules!", ""),
("/*Pinguino Rules!*/", ""),
("/*Pinguino //Rules!*/", ""),
("///*Pinguino Rules!*/", ""),
("\n".join([
"#define LED1 0\n",
"//#define LED2 1\n",
"/*\n",
"1\n",
"2\n",
"3\n",
"*/\n",
"#include <math.h>\n",
]),
"\n".join([
"#define LED1 0\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"#include <math.h>\n",
]),
)
)
for case in cases:
# print("Testing remove comments for case: {}".format(case))
got = Pinguino.remove_comments(case[0])
expected = case[1]
self.assertMultiLineEqual(got, expected,
"Remove comments: Failure\ngot: '{}'\nexpected: '{}'".format(got, expected))
#----------------------------------------------------------------------
@classmethod
def preprocess(cls, lib, libinstructions):
def inter(self):
# print("Testing preprocess pinguino -> c | {} -> {}".format(lib["pinguino"], lib["c"]))
got, d = Pinguino.replace_word(lib["pinguino"], libinstructions)
#if lib["pinguino"] == "1wire.readBit":
#print(got)
self.assertEqual(got, lib["c"],
"Preprocess: Failure\ngot: '{}'\nexpected: '{}'".format(got, lib["c"]))
return inter
########################################################################
class TestBareMinumumCompilation(unittest.TestCase):
#----------------------------------------------------------------------
@classmethod
def compilation(cls, board, icsp=False, compiler="gcc"):
code = "void setup(){} void loop(){}"
def inter(self):
# print("Testing compilation (bare minimum) for board {name} - {connect} - {arch}bit - {proc} - {compiler}".format(compiler=compiler, **board.__dict__))
Pinguino.set_board(board)
if icsp:
Pinguino.set_icsp()
if compiler != "gcc":
Pinguino.set_8bit_compiler(compiler)
try:
Pinguino.compile_string(code)
except BaseException as msg:
self.fail("Compilation: impossible compile for {}, {}-bit\n{}".format(board.name, board.arch, str(msg)))
if not Pinguino.compiled():
msg = Pinguino.get_errors()
self.fail("Compilation: impossible compile for {}, {}-bit\n{}".format(board.name, board.arch, str(msg)))
return inter
libs8 = Pinguino.get_regobject_libinstructions(8)
for lib in libs8:
test_name = "test_preprocess_8_{}".format(lib["pinguino"].replace(".", "_"))
setattr(TestPreprocess, test_name, TestPreprocess.preprocess(lib, libs8))
libs32 = Pinguino.get_regobject_libinstructions(8)
for lib in libs32:
test_name = "test_preprocess_32_{}".format(lib["pinguino"].replace(".", "-"))
setattr(TestPreprocess, test_name, TestPreprocess.preprocess(lib, libs32))
for board in AllBoards:
if board.arch == 8:
for compiler in ["sdcc", "xc8"]:
test_name = "test_compile_icsp_{}_{}".format(compiler, board.name.replace(" ", "_").replace(".", "_"))
setattr(TestBareMinumumCompilation, test_name, TestBareMinumumCompilation.compilation(board, icsp=True, compiler=compiler))
test_name = "test_compile_{}_{}_{}".format(compiler, board.connect, board.name.replace(" ", "_").replace(".", "_"))
setattr(TestBareMinumumCompilation, test_name, TestBareMinumumCompilation.compilation(board, icsp=False, compiler=compiler))
else:
test_name = "test_compile_{}_{}".format(board.connect, board.name.replace(" ", "_").replace(".", "_"))
setattr(TestBareMinumumCompilation, test_name, TestBareMinumumCompilation.compilation(board))
# unittest.main()
suite = unittest.TestSuite()
suite.addTests(unittest.defaultTestLoader.loadTestsFromTestCase(TestBareMinumumCompilation))
suite.addTests(unittest.defaultTestLoader.loadTestsFromTestCase(TestPreprocess))
unittest.TextTestRunner().run(suite)
|
unknown
|
codeparrot/codeparrot-clean
| ||
<p class="datetime">
<label {% if widget.attrs.id %}for="{{ widget.subwidgets.0.attrs.id }}"{% endif %}>{{ date_label }}</label> {% with widget=widget.subwidgets.0 %}{% include widget.template_name %}{% endwith %}<br>
<label {% if widget.attrs.id %}for="{{ widget.subwidgets.1.attrs.id }}"{% endif %}>{{ time_label }}</label> {% with widget=widget.subwidgets.1 %}{% include widget.template_name %}{% endwith %}
</p>
|
html
|
github
|
https://github.com/django/django
|
django/contrib/admin/templates/admin/widgets/split_datetime.html
|
"""Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2014 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.6.1"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
try:
result = self._resolve()
except ImportError:
# See the nice big comment in MovedModule.__getattr__.
raise AttributeError("%s could not be imported " % self.name)
setattr(obj, self.name, result) # Invokes __set__.
# This is a bit ugly, but it avoids running this again.
delattr(obj.__class__, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
# It turns out many Python frameworks like to traverse sys.modules and
# try to load various attributes. This causes problems if this is a
# platform-specific module on the wrong platform, like _winreg on
# Unixes. Therefore, we silently pretend unimportable modules do not
# have any attributes. See issues #51, #53, #56, and #63 for the full
# tales of woe.
#
# First, if possible, avoid loading the module just to look at __file__,
# __name__, or __path__.
if (attr in ("__file__", "__name__", "__path__") and
self.mod not in sys.modules):
raise AttributeError(attr)
try:
_module = self._resolve()
except ImportError:
raise AttributeError(attr)
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "xmlrpclib", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
sys.modules[__name__ + ".moves." + attr.name] = attr
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = sys.modules[__name__ + ".moves"] = _MovedItems(__name__ + ".moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
sys.modules[__name__ + ".moves.urllib_parse"] = sys.modules[__name__ + ".moves.urllib.parse"] = Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
sys.modules[__name__ + ".moves.urllib_error"] = sys.modules[__name__ + ".moves.urllib.error"] = Module_six_moves_urllib_error(__name__ + ".moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
sys.modules[__name__ + ".moves.urllib_request"] = sys.modules[__name__ + ".moves.urllib.request"] = Module_six_moves_urllib_request(__name__ + ".moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
sys.modules[__name__ + ".moves.urllib_response"] = sys.modules[__name__ + ".moves.urllib.response"] = Module_six_moves_urllib_response(__name__ + ".moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
sys.modules[__name__ + ".moves.urllib_robotparser"] = sys.modules[__name__ + ".moves.urllib.robotparser"] = Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
parse = sys.modules[__name__ + ".moves.urllib_parse"]
error = sys.modules[__name__ + ".moves.urllib_error"]
request = sys.modules[__name__ + ".moves.urllib_request"]
response = sys.modules[__name__ + ".moves.urllib_response"]
robotparser = sys.modules[__name__ + ".moves.urllib_robotparser"]
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
sys.modules[__name__ + ".moves.urllib"] = Module_six_moves_urllib(__name__ + ".moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
_iterlists = "lists"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
_iterlists = "iterlists"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
def iterkeys(d, **kw):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)(**kw))
def itervalues(d, **kw):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)(**kw))
def iteritems(d, **kw):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)(**kw))
def iterlists(d, **kw):
"""Return an iterator over the (key, [values]) pairs of a dictionary."""
return iter(getattr(d, _iterlists)(**kw))
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
def iterbytes(buf):
return (ord(byte) for byte in buf)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta("NewBase", bases, {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
### Additional customizations for Django ###
if PY3:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
memoryview = memoryview
else:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
# memoryview and buffer are not stricly equivalent, but should be fine for
# django core usage (mainly BinaryField). However, Jython doesn't support
# buffer (see http://bugs.jython.org/issue1521), so we have to be careful.
if sys.platform.startswith('java'):
memoryview = memoryview
else:
memoryview = buffer
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
add_move(MovedModule("_dummy_thread", "dummy_thread"))
add_move(MovedModule("_thread", "thread"))
|
unknown
|
codeparrot/codeparrot-clean
| ||
# (c) 2014, James Tanner <tanner.jc@gmail.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# http://docs.ansible.com/playbooks_vault.html for more details.
import os
import sys
import traceback
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.parsing.vault import VaultEditor
from ansible.cli import CLI
from ansible.utils.display import Display
class VaultCLI(CLI):
""" Vault command line class """
VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view")
CIPHER = 'AES256'
def __init__(self, args, display=None):
self.vault_pass = None
super(VaultCLI, self).__init__(args, display)
def parse(self):
self.parser = CLI.base_parser(
vault_opts=True,
usage = "usage: %%prog [%s] [--help] [options] vaultfile.yml" % "|".join(self.VALID_ACTIONS),
epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
self.set_action()
# options specific to self.actions
if self.action == "create":
self.parser.set_usage("usage: %prog create [options] file_name")
elif self.action == "decrypt":
self.parser.set_usage("usage: %prog decrypt [options] file_name")
elif self.action == "edit":
self.parser.set_usage("usage: %prog edit [options] file_name")
elif self.action == "view":
self.parser.set_usage("usage: %prog view [options] file_name")
elif self.action == "encrypt":
self.parser.set_usage("usage: %prog encrypt [options] file_name")
elif self.action == "rekey":
self.parser.set_usage("usage: %prog rekey [options] file_name")
self.options, self.args = self.parser.parse_args()
self.display.verbosity = self.options.verbosity
if len(self.args) == 0 or len(self.args) > 1:
raise AnsibleOptionsError("Vault requires a single filename as a parameter")
def run(self):
super(VaultCLI, self).run()
if self.options.vault_password_file:
# read vault_pass from a file
self.vault_pass = CLI.read_vault_password_file(self.options.vault_password_file)
else:
self.vault_pass, _= self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)
if not self.vault_pass:
raise AnsibleOptionsError("A password is required to use Ansible's Vault")
self.execute()
def execute_create(self):
cipher = getattr(self.options, 'cipher', self.CIPHER)
this_editor = VaultEditor(cipher, self.vault_pass, self.args[0])
this_editor.create_file()
def execute_decrypt(self):
cipher = getattr(self.options, 'cipher', self.CIPHER)
for f in self.args:
this_editor = VaultEditor(cipher, self.vault_pass, f)
this_editor.decrypt_file()
self.display.display("Decryption successful")
def execute_edit(self):
for f in self.args:
this_editor = VaultEditor(None, self.vault_pass, f)
this_editor.edit_file()
def execute_view(self):
for f in self.args:
this_editor = VaultEditor(None, self.vault_pass, f)
this_editor.view_file()
def execute_encrypt(self):
cipher = getattr(self.options, 'cipher', self.CIPHER)
for f in self.args:
this_editor = VaultEditor(cipher, self.vault_pass, f)
this_editor.encrypt_file()
self.display.display("Encryption successful")
def execute_rekey(self):
__, new_password = self.ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True)
for f in self.args:
this_editor = VaultEditor(None, self.vault_pass, f)
this_editor.rekey_file(new_password)
self.display.display("Rekey successful")
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import excutils
import taskflow.engines
from taskflow.patterns import linear_flow
from cinder import exception
from cinder import flow_utils
from cinder.i18n import _, _LE
from cinder import rpc
from cinder import utils
from cinder.volume.flows import common
LOG = logging.getLogger(__name__)
ACTION = 'volume:create'
class ExtractSchedulerSpecTask(flow_utils.CinderTask):
"""Extracts a spec object from a partial and/or incomplete request spec.
Reversion strategy: N/A
"""
default_provides = set(['request_spec'])
def __init__(self, db_api, **kwargs):
super(ExtractSchedulerSpecTask, self).__init__(addons=[ACTION],
**kwargs)
self.db_api = db_api
def _populate_request_spec(self, context, volume_id, snapshot_id,
image_id):
# Create the full request spec using the volume_id.
#
# NOTE(harlowja): this will fetch the volume from the database, if
# the volume has been deleted before we got here then this should fail.
#
# In the future we might want to have a lock on the volume_id so that
# the volume can not be deleted while its still being created?
if not volume_id:
msg = _("No volume_id provided to populate a request_spec from")
raise exception.InvalidInput(reason=msg)
volume_ref = self.db_api.volume_get(context, volume_id)
volume_type_id = volume_ref.get('volume_type_id')
vol_type = self.db_api.volume_type_get(context, volume_type_id)
return {
'volume_id': volume_id,
'snapshot_id': snapshot_id,
'image_id': image_id,
'volume_properties': {
'size': utils.as_int(volume_ref.get('size'), quiet=False),
'availability_zone': volume_ref.get('availability_zone'),
'volume_type_id': volume_type_id,
},
'volume_type': list(dict(vol_type).iteritems()),
}
def execute(self, context, request_spec, volume_id, snapshot_id,
image_id):
# For RPC version < 1.2 backward compatibility
if request_spec is None:
request_spec = self._populate_request_spec(context, volume_id,
snapshot_id, image_id)
return {
'request_spec': request_spec,
}
class ScheduleCreateVolumeTask(flow_utils.CinderTask):
"""Activates a scheduler driver and handles any subsequent failures.
Notification strategy: on failure the scheduler rpc notifier will be
activated and a notification will be emitted indicating what errored,
the reason, and the request (and misc. other data) that caused the error
to be triggered.
Reversion strategy: N/A
"""
FAILURE_TOPIC = "scheduler.create_volume"
def __init__(self, db_api, driver_api, **kwargs):
super(ScheduleCreateVolumeTask, self).__init__(addons=[ACTION],
**kwargs)
self.db_api = db_api
self.driver_api = driver_api
def _handle_failure(self, context, request_spec, cause):
try:
self._notify_failure(context, request_spec, cause)
finally:
LOG.error(_LE("Failed to run task %(name)s: %(cause)s") %
{'cause': cause, 'name': self.name})
def _notify_failure(self, context, request_spec, cause):
"""When scheduling fails send out an event that it failed."""
payload = {
'request_spec': request_spec,
'volume_properties': request_spec.get('volume_properties', {}),
'volume_id': request_spec['volume_id'],
'state': 'error',
'method': 'create_volume',
'reason': cause,
}
try:
rpc.get_notifier('scheduler').error(context, self.FAILURE_TOPIC,
payload)
except exception.CinderException:
LOG.exception(_LE("Failed notifying on %(topic)s "
"payload %(payload)s") %
{'topic': self.FAILURE_TOPIC, 'payload': payload})
def execute(self, context, request_spec, filter_properties):
try:
self.driver_api.schedule_create_volume(context, request_spec,
filter_properties)
except exception.NoValidHost as e:
# No host found happened, notify on the scheduler queue and log
# that this happened and set the volume to errored out and
# *do not* reraise the error (since whats the point).
try:
self._handle_failure(context, request_spec, e)
finally:
common.error_out_volume(context, self.db_api,
request_spec['volume_id'], reason=e)
except Exception as e:
# Some other error happened, notify on the scheduler queue and log
# that this happened and set the volume to errored out and
# *do* reraise the error.
with excutils.save_and_reraise_exception():
try:
self._handle_failure(context, request_spec, e)
finally:
common.error_out_volume(context, self.db_api,
request_spec['volume_id'],
reason=e)
def get_flow(context, db_api, driver_api, request_spec=None,
filter_properties=None,
volume_id=None, snapshot_id=None, image_id=None):
"""Constructs and returns the scheduler entrypoint flow.
This flow will do the following:
1. Inject keys & values for dependent tasks.
2. Extracts a scheduler specification from the provided inputs.
3. Attaches 2 activated only on *failure* tasks (one to update the db
status and one to notify on the MQ of the failure that occurred).
4. Uses provided driver to then select and continue processing of
volume request.
"""
create_what = {
'context': context,
'raw_request_spec': request_spec,
'filter_properties': filter_properties,
'volume_id': volume_id,
'snapshot_id': snapshot_id,
'image_id': image_id,
}
flow_name = ACTION.replace(":", "_") + "_scheduler"
scheduler_flow = linear_flow.Flow(flow_name)
# This will extract and clean the spec from the starting values.
scheduler_flow.add(ExtractSchedulerSpecTask(
db_api,
rebind={'request_spec': 'raw_request_spec'}))
# This will activate the desired scheduler driver (and handle any
# driver related failures appropriately).
scheduler_flow.add(ScheduleCreateVolumeTask(db_api, driver_api))
# Now load (but do not run) the flow using the provided initial data.
return taskflow.engines.load(scheduler_flow, store=create_what)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import numpy as np
import bpy
from bpy.props import FloatProperty, EnumProperty, BoolProperty, IntProperty, StringProperty
from mathutils import Vector, Matrix
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, zip_long_repeat, ensure_nesting_level, describe_data_shape, describe_data_structure
from sverchok.utils.logging import info, exception
from sverchok.utils.field.scalar import SvScalarField
from sverchok.utils.surface import SvSurface
from sverchok.utils.marching_squares import make_contours
from sverchok.utils.sv_mesh_utils import mesh_join
from sverchok.utils.dummy_nodes import add_dummy
from sverchok.dependencies import skimage
if skimage is None:
add_dummy('SvExMSquaresOnSurfaceNode', "Marching Squares on Surface", 'skimage')
else:
from skimage import measure
class SvExMSquaresOnSurfaceNode(bpy.types.Node, SverchCustomTreeNode):
"""
Triggers: Marching Squares on Surface
Tooltip: Marching Squares on Surface
"""
bl_idname = 'SvExMSquaresOnSurfaceNode'
bl_label = 'Marching Squares on Surface'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_EX_MSQUARES'
iso_value : FloatProperty(
name = "Value",
default = 1.0,
update = updateNode)
samples_u : IntProperty(
name = "Samples U",
default = 50,
min = 4,
update = updateNode)
samples_v : IntProperty(
name = "Samples V",
default = 50,
min = 4,
update = updateNode)
connect_bounds : BoolProperty(
name = "Connect boundary",
default = True,
update = updateNode)
join : BoolProperty(
name = "Join by Surface",
description = "Output single list of vertices / edges for all input surfaces",
default = True,
update = updateNode)
def sv_init(self, context):
self.inputs.new('SvScalarFieldSocket', "Field")
self.inputs.new('SvSurfaceSocket', "Surface")
self.inputs.new('SvStringsSocket', "Value").prop_name = 'iso_value'
self.inputs.new('SvStringsSocket', "SamplesU").prop_name = 'samples_u'
self.inputs.new('SvStringsSocket', "SamplesV").prop_name = 'samples_v'
self.outputs.new('SvVerticesSocket', "Vertices")
self.outputs.new('SvStringsSocket', "Edges")
self.outputs.new('SvVerticesSocket', "UVVertices")
def draw_buttons(self, context, layout):
layout.prop(self, 'join', toggle=True)
layout.prop(self, 'connect_bounds', toggle=True)
def process(self):
if not any(socket.is_linked for socket in self.outputs):
return
fields_s = self.inputs['Field'].sv_get()
surface_s = self.inputs['Surface'].sv_get()
samples_u_s = self.inputs['SamplesU'].sv_get()
samples_v_s = self.inputs['SamplesV'].sv_get()
value_s = self.inputs['Value'].sv_get()
fields_s = ensure_nesting_level(fields_s, 2, data_types=(SvScalarField,))
surface_s = ensure_nesting_level(surface_s, 2, data_types=(SvSurface,))
samples_u_s = ensure_nesting_level(samples_u_s, 2)
samples_v_s = ensure_nesting_level(samples_v_s, 2)
value_s = ensure_nesting_level(value_s, 2)
verts_out = []
edges_out = []
uv_verts_out = []
for field_i, surface_i, samples_u_i, samples_v_i, value_i in zip_long_repeat(fields_s, surface_s, samples_u_s, samples_v_s, value_s):
for field, surface, samples_u, samples_v, value in zip_long_repeat(field_i, surface_i, samples_u_i, samples_v_i, value_i):
surface_verts = []
surface_uv = []
surface_edges = []
u_min, u_max = surface.get_u_min(), surface.get_u_max()
v_min, v_max = surface.get_v_min(), surface.get_v_max()
u_range = np.linspace(u_min, u_max, num=samples_u)
v_range = np.linspace(v_min, v_max, num=samples_v)
us, vs = np.meshgrid(u_range, v_range, indexing='ij')
us, vs = us.flatten(), vs.flatten()
surface_points = surface.evaluate_array(us, vs)
xs = surface_points[:,0]
ys = surface_points[:,1]
zs = surface_points[:,2]
field_values = field.evaluate_grid(xs, ys, zs)
field_values = field_values.reshape((samples_u, samples_v))
contours = measure.find_contours(field_values, level=value)
u_size = (u_max - u_min)/samples_u
v_size = (v_max - v_min)/samples_v
uv_contours, new_edges, _ = make_contours(samples_u, samples_v, u_min, u_size, v_min, v_size, 0, contours, make_faces=True, connect_bounds = self.connect_bounds)
if uv_contours:
for uv_points in uv_contours:
us = np.array([p[0] for p in uv_points])
vs = np.array([p[1] for p in uv_points])
new_verts = surface.evaluate_array(us, vs).tolist()
surface_uv.append(uv_points)
surface_verts.append(new_verts)
surface_edges.extend(new_edges)
if self.join:
surface_verts, surface_edges, _ = mesh_join(surface_verts, surface_edges, [[]]*len(surface_edges))
surface_uv = sum(surface_uv, [])
verts_out.append(surface_verts)
uv_verts_out.append(surface_uv)
edges_out.append(surface_edges)
else:
verts_out.append([])
uv_verts_out.append([])
edges_out.append([])
self.outputs['Vertices'].sv_set(verts_out)
self.outputs['Edges'].sv_set(edges_out)
if 'UVVertices' in self.outputs:
self.outputs['UVVertices'].sv_set(uv_verts_out)
def register():
if skimage is not None:
bpy.utils.register_class(SvExMSquaresOnSurfaceNode)
def unregister():
if skimage is not None:
bpy.utils.unregister_class(SvExMSquaresOnSurfaceNode)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys
CONFIG_NAME = ".clang_complete"
def readConfiguration():
try:
f = open(CONFIG_NAME, "r")
except IOError:
return []
result = []
for line in f.readlines():
strippedLine = line.strip()
if strippedLine:
result.append(strippedLine)
f.close()
return result
def writeConfiguration(lines):
f = open(CONFIG_NAME, "w")
f.writelines(lines)
f.close()
def parseArguments(arguments):
nextIsInclude = False
nextIsDefine = False
nextIsIncludeFile = False
includes = []
defines = []
include_file = []
options = []
for arg in arguments:
if nextIsInclude:
includes += [arg]
nextIsInclude = False
elif nextIsDefine:
defines += [arg]
nextIsDefine = False
elif nextIsIncludeFile:
include_file += [arg]
nextIsIncludeFile = False
elif arg == "-I":
nextIsInclude = True
elif arg == "-D":
nextIsDefine = True
elif arg[:2] == "-I":
includes += [arg[2:]]
elif arg[:2] == "-D":
defines += [arg[2:]]
elif arg == "-include":
nextIsIncludeFile = True
elif arg.startswith('-std='):
options.append(arg)
elif arg == '-ansi':
options.append(arg)
elif arg.startswith('-pedantic'):
options.append(arg)
elif arg.startswith('-W'):
options.append(arg)
result = list(map(lambda x: "-I" + x, includes))
result.extend(map(lambda x: "-D" + x, defines))
result.extend(map(lambda x: "-include " + x, include_file))
result.extend(options)
return result
def mergeLists(base, new):
result = list(base)
for newLine in new:
if newLine not in result:
result.append(newLine)
return result
configuration = readConfiguration()
args = parseArguments(sys.argv)
result = mergeLists(configuration, args)
writeConfiguration(map(lambda x: x + "\n", result))
import subprocess
proc = subprocess.Popen(sys.argv[1:])
ret = proc.wait()
if ret is None:
sys.exit(1)
sys.exit(ret)
# vim: set ts=2 sts=2 sw=2 expandtab :
|
unknown
|
codeparrot/codeparrot-clean
| ||
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from xml.dom import minidom
from django.test import TestCase
from django.template import Template, Context
from django.conf import settings
from django.utils.unittest import skipIf
from billing import get_integration
@skipIf(not settings.MERCHANT_SETTINGS.get("world_pay", None), "WorldPay integration not configured")
class WorldPayTestCase(TestCase):
def setUp(self):
self.wp = get_integration("world_pay")
fields = {
"cartId": "TEST123",
"amount": "1",
"currency": "USD",
"testMode": "100",
"futurePayType": "regular",
"option": "0",
"noOfPayments": "12",
"intervalUnit": "3",
"intervalMult": "1",
"normalAmount": "1",
"startDelayUnit": "3",
"startDelayMult": "1",
"instId": "12345",
"signatureFields": "instId:amount:cartId",
}
self.wp.add_fields(fields)
def assertFormIsCorrect(self, form, fields):
dom = minidom.parseString(form)
inputs = dom.getElementsByTagName('input')
values_dict = {}
for el in inputs:
if el.attributes['type'].value == 'hidden' and el.hasAttribute('value'):
values_dict[el.attributes['name'].value] = el.attributes['value'].value
self.assertDictContainsSubset(values_dict, fields)
form_action_url = dom.getElementsByTagName('form')[0].attributes['action'].value
parsed = urlparse(form_action_url)
self.assertEquals(parsed.scheme, 'https')
self.assertEquals(parsed.netloc, 'select-test.worldpay.com')
self.assertEquals(parsed.path, '/wcc/purchase')
def testFormGen(self):
# Since the secret key cannot be distributed
settings.WORLDPAY_MD5_SECRET_KEY = "test"
tmpl = Template("{% load render_integration from billing_tags %}{% render_integration obj %}")
form = tmpl.render(Context({"obj": self.wp}))
self.assertFormIsCorrect(form, self.wp.fields)
def testFormGen2(self):
# Since the secret key cannot be distributed
settings.WORLDPAY_MD5_SECRET_KEY = "test"
self.wp.add_field("signatureFields", "instId:amount:currency:cartId")
self.wp.fields.pop("signature", None)
tmpl = Template("{% load render_integration from billing_tags %}{% render_integration obj %}")
form = tmpl.render(Context({"obj": self.wp}))
self.assertFormIsCorrect(form, self.wp.fields)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from .. import dataformat
from struct import Struct, unpack_from
from .. import util
from ..util import dbg, zstr
class UnitCommand(dataformat.Exportable):
name_struct = "unit_command"
name_struct_file = "unit"
struct_description = "a command a single unit may recieve by script or human."
data_format = (
(dataformat.READ, "command_used", "int16_t"), #always 1
(dataformat.READ_EXPORT, "id", "int16_t"), #command id
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ_EXPORT, "type", dataformat.EnumLookupMember(
raw_type = "int16_t",
type_name = "command_ability",
lookup_dict = {
0: "UNUSED",
1: "MOVE_TO",
2: "FOLLOW",
3: "GARRISON",
4: "EXPLORE",
5: "GATHER", # gather, rebuild
6: "UNKNOWN_ANIMAL",
7: "ATTACK",
8: "SHOOT",
10: "FLY",
11: "UNKNOWN_PREDATOR",
12: "UNLOAD", # transport, garrison
13: "GUARD",
20: "ESCAPE", # sure?
21: "UNKNOWN_FARM",
101: "BUILD",
102: "MAKE_OBJECT",
103: "MAKE_TECH",
104: "CONVERT",
105: "HEAL",
106: "REPAIR",
107: "CONVERT_AUTO",
109: "UNKNOWN_109",
110: "HUNT",
111: "TRADE",
120: "WONDER_VICTORY_GENERATE",
121: "DESELECT_ON_TASK",
122: "LOOT",
123: "HOUSING",
125: "UNPACK_ATTACK",
131: "UNKNOWN_131",
132: "PICKUP_UNIT",
135: "KIDNAP_UNIT",
136: "DEPOSIT_UNIT",
768: "UNKNOWN_768",
1024: "UNKNOWN_1024",
},
)),
(dataformat.READ_EXPORT, "class_id", "int16_t"),
(dataformat.READ_EXPORT, "unit_id", "int16_t"),
(dataformat.READ_UNKNOWN, None, "int16_t"),
(dataformat.READ_EXPORT, "ressource_in", "int16_t"),
(dataformat.READ_EXPORT, "ressource_productivity", "int16_t"), #resource that multiplies the amount you can gather
(dataformat.READ_EXPORT, "ressource_out", "int16_t"),
(dataformat.READ_EXPORT, "ressource", "int16_t"),
(dataformat.READ_EXPORT, "work_rate_multiplier", "float"),
(dataformat.READ_EXPORT, "execution_radius", "float"),
(dataformat.READ_EXPORT, "extra_range", "float"),
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ_UNKNOWN, None, "float"),
(dataformat.READ, "selection_enabled", "int8_t"), #1=allows to select a target, type defined in `selection_type`
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ_UNKNOWN, None, "int16_t"),
(dataformat.READ_UNKNOWN, None, "int16_t"),
(dataformat.READ_EXPORT, "targets_allowed", dataformat.EnumLookupMember(
raw_type = "int8_t", #what can be selected as a target for the unit command?
type_name = "selection_type",
lookup_dict = {
0: "ANY_0", #select anything
1: "OWNED_UNITS", #your own things
2: "NEUTRAL_ENEMY", #enemy and neutral things (->attack)
3: "NOTHING",
4: "GAIA_OWNED_ALLY", #any of gaia, owned or allied things
5: "GAYA_NEUTRAL_ENEMY", #any of gaia, neutral or enemy things
6: "NOT_OWNED", #all things that aren't yours
7: "ANY_7",
},
)),
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ, "tool_graphic_id", "int16_t"), #walking with tool but no ressource
(dataformat.READ, "proceed_graphic_id", "int16_t"), #proceeding ressource gathering or attack
(dataformat.READ, "action_graphic_id", "int16_t"), #actual execution or transformation graphic
(dataformat.READ, "carrying_graphic_id", "int16_t"), #display ressources in hands
(dataformat.READ, "execution_sound_id", "int16_t"), #sound to play when execution starts
(dataformat.READ, "ressource_deposit_sound_id", "int16_t"), #sound to play on ressource drop
)
class UnitHeader(dataformat.Exportable):
name_struct = "unit_header"
name_struct_file = "unit"
struct_description = "stores a bunch of unit commands."
data_format = (
(dataformat.READ, "exists", dataformat.ContinueReadMember("uint8_t")),
(dataformat.READ, "unit_command_count", "uint16_t"),
(dataformat.READ_EXPORT, "unit_commands", dataformat.SubdataMember(
ref_type=UnitCommand,
length="unit_command_count",
)),
)
class RessourceStorage(dataformat.Exportable):
name_struct = "ressource_storage"
name_struct_file = "unit"
struct_description = "determines the resource storage capacity for one unit mode."
data_format = (
(dataformat.READ, "type", "int16_t"),
(dataformat.READ, "amount", "float"),
(dataformat.READ, "used_mode", dataformat.EnumLookupMember(
raw_type = "int8_t",
type_name = "ressource_handling",
lookup_dict = {
0: "DECAYABLE",
1: "KEEP_AFTER_DEATH",
2: "RESET_ON_DEATH_INSTANT",
4: "RESET_ON_DEATH_WHEN_COMPLETED",
},
)),
)
class DamageGraphic(dataformat.Exportable):
name_struct = "damage_graphic"
name_struct_file = "unit"
struct_description = "stores one possible unit image that is displayed at a given damage percentage."
data_format = (
(dataformat.READ_EXPORT, "graphic_id", "int16_t"),
(dataformat.READ_EXPORT, "damage_percent", "int8_t"),
(dataformat.READ_EXPORT, "apply_mode", dataformat.EnumLookupMember(
raw_type = "int8_t",
type_name = "damage_draw_type",
lookup_dict = {
0: "ADD_FLAMES_0",
1: "ADD_FLAMES_1",
2: "REPLACE",
},
)),
(dataformat.READ_UNKNOWN, None, "int8_t"),
)
class HitType(dataformat.Exportable):
name_struct = "hit_type"
name_struct_file = "unit"
struct_description = "stores attack amount for a damage type."
data_format = (
(dataformat.READ, "type_id", dataformat.EnumLookupMember(
raw_type = "int16_t",
type_name = "hit_class",
lookup_dict = {
-1: "NONE",
1: "INFANTRY",
2: "SHIP_TURTLE",
3: "UNITS_PIERCE",
4: "UNITS_MELEE",
5: "WAR_ELEPHANT",
8: "CAVALRY",
11: "BUILDINGS_NO_PORT",
13: "STONE_DEFENSES",
15: "ARCHERS",
16: "SHIPS_CAMELS_SABOTEURS",
17: "RAMS",
18: "TREES",
19: "UNIQUE_UNITS",
20: "SIEGE_WEAPONS",
21: "BUILDINGS",
22: "WALLS_GATES",
24: "BOAR",
25: "MONKS",
26: "CASTLE",
27: "SPEARMEN",
28: "CAVALRY_ARCHER",
29: "EAGLE_WARRIOR",
},
)),
(dataformat.READ, "amount", "int16_t"),
)
class RessourceCost(dataformat.Exportable):
name_struct = "ressource_cost"
name_struct_file = "unit"
struct_description = "stores cost for one ressource for creating the unit."
data_format = (
(dataformat.READ, "type_id", dataformat.EnumLookupMember(
raw_type = "int16_t",
type_name = "ressource_types",
lookup_dict = {
-1: "NONE",
0: "FOOD_STORAGE",
1: "WOOD_STORAGE",
2: "STONE_STORAGE",
3: "GOLD_STORAGE",
4: "POPULATION_HEADROOM",
5: "CONVERSION_RANGE",
6: "CURRENT_AGE",
7: "OWNED_RELIC_COUNT",
8: "TRADE_BONUS",
9: "TRADE_GOODS",
10: "TRADE_PRODUCTION",
11: "POPULATION", #both current population and population headroom
12: "CORPSE_DECAY_TIME",
13: "DISCOVERY",
14: "RUIN_MONUMENTS_CAPTURED", #unused
15: "PREDATOR_ANIMAL_FOOD",
16: "CROPS",
17: "FISH_STORAGE",
18: "UNKNOWN_18",
19: "TOTAL_UNITS_OWNED", #or just military ones? used for counting losses
20: "UNITS_KILLED",
21: "RESEARCHED_TECHNOLOGIES_COUNT",
23: "TECHNOLOGY_ID_0", #default: 102
24: "TECHNOLOGY_ID_1", #default: 103
25: "TECHNOLOGY_ID_2", #default: 101
27: "ATONEMENT", #bool
28: "REDEMPTION", #bool
30: "VAL_500", #default: 500
32: "BONUS_POPULATION",
35: "FAITH_RECHARGE_RATE", #default: 1.6
36: "FARM_FOOD_AMOUNT", #default: 175
37: "CIVILIAN_POPULATION",
38: "UNKNOWN_38",
39: "ALL_TECHS_ACHIEVED", #default: 178
40: "MILITARY_POPULATION", #-> largest army
41: "UNITS_CONVERTED",
42: "WONDERS_STANDING",
43: "BUILDINGS_RAZED",
44: "KILL_RATIO",
45: "SURVIVAL_TO_FINISH", #bool
46: "TRIBUTE_FEE", #default: 0.3
47: "GOLD_MINING_PRODUCTIVITY", #default: 1
48: "TOWN_CENTER_AVAILABLE",
49: "GOLD_COUNTER",
50: "REVEAL_ALLY", #bool, ==cartography discovered
51: "HOUSES_UNUSED",
52: "MONASTERY_COUNT",
53: "TRIBUTE_SENT",
54: "RUINES_CAPTURED_ALL", #bool
55: "RELICS_CAPTURED_ALL", #bool
56: "LOAD_STORAGE", #or unit unload room?
57: "CAPTURED_UNITS",
58: "DARK_AGE", #default: 104
59: "TRADE_GOOD_QUALITY", #default: 1
60: "TRADE_MARKET_LEVEL",
61: "FORMATIONS",
62: "BUILDING_HOUSING_RATE", #default: 20
63: "GATHER_TAX_RATE", #default: 32000
64: "GATHER_ACCUMULATOR",
65: "SALVAGE_DECAY_RATE", #default: 5
66: "ALLOW_FORMATION", #bool, something with age?
67: "CONVERSIONS", #bool?
68: "HIT_POINTS_KILLED", #unused
69: "KILLED_PLAYER_1", #bool
70: "KILLED_PLAYER_2", #bool
71: "KILLED_PLAYER_3", #bool
72: "KILLED_PLAYER_4", #bool
73: "KILLED_PLAYER_5", #bool
74: "KILLED_PLAYER_6", #bool
75: "KILLED_PLAYER_7", #bool
76: "KILLED_PLAYER_8", #bool
77: "CONVERSION_RESISTANCE",
78: "TRADE_FEE", #default: 0.3
79: "STONE_MINING_PRODUCTIVITY", #default: 1
80: "QUEUED_UNITS",
81: "TRAINING_COUNT",
82: "START_PACKED_TOWNCENTER", #or raider, default: 2
83: "BOARDING_RECHARGE_RATE",
84: "STARTING_VILLAGERS", #default: 3
85: "RESEARCH_COST_MULTIPLIER",
86: "RESEARCH_TIME_MULTIPLIER",
87: "CONVERT_SHIPS_ABILITY", #bool
88: "FISH_TRAP_FOOD_AMOUNT", #default: 700
89: "BONUS_HEALING_RATE",
90: "HEALING_RANGE",
91: "BONUS_STARTING_FOOD",
92: "BONUS_STARTING_WOOD",
93: "BONUS_STARTING_STONE",
94: "BONUS_STARTING_GOLD",
95: "TOWN_CENTER_PACKING", #or raider, default: 3
96: "SELF_HEALING_SECONDS_BERSERKER",
97: "ANIMAL_DISCOVERY_DOMINANT_LOS", #bool, sheep/turkey
98: "SCORE_ECONOMY", #object cost summary
99: "SCORE_TECHNOLOGY",
100: "RELIC_GOLD_COLLECTED",
101: "TRADE_PROFIT",
102: "TRIBUTE_P1",
103: "TRIBUTE_P2",
104: "TRIBUTE_P3",
105: "TRIBUTE_P4",
106: "TRIBUTE_P5",
107: "TRIBUTE_P6",
108: "TRIBUTE_P7",
109: "TRIBUTE_P8",
110: "KILL_SCORE_P1",
111: "KILL_SCORE_P2",
112: "KILL_SCORE_P3",
113: "KILL_SCORE_P4",
114: "KILL_SCORE_P5",
115: "KILL_SCORE_P6",
116: "KILL_SCORE_P7",
117: "KILL_SCORE_P8",
118: "RAZING_COUNT_P1",
119: "RAZING_COUNT_P2",
120: "RAZING_COUNT_P3",
121: "RAZING_COUNT_P4",
122: "RAZING_COUNT_P5",
123: "RAZING_COUNT_P6",
124: "RAZING_COUNT_P7",
125: "RAZING_COUNT_P8",
126: "RAZING_SCORE_P1",
127: "RAZING_SCORE_P2",
128: "RAZING_SCORE_P3",
129: "RAZING_SCORE_P4",
130: "RAZING_SCORE_P5",
131: "RAZING_SCORE_P6",
132: "RAZING_SCORE_P7",
133: "RAZING_SCORE_P8",
134: "STANDING_CASTLES",
135: "RAZINGS_HIT_POINTS",
136: "KILLS_BY_P1",
137: "KILLS_BY_P2",
138: "KILLS_BY_P3",
139: "KILLS_BY_P4",
140: "KILLS_BY_P5",
141: "KILLS_BY_P6",
142: "KILLS_BY_P7",
143: "KILLS_BY_P8",
144: "RAZINGS_BY_P1",
145: "RAZINGS_BY_P2",
146: "RAZINGS_BY_P3",
147: "RAZINGS_BY_P4",
148: "RAZINGS_BY_P5",
149: "RAZINGS_BY_P6",
150: "RAZINGS_BY_P7",
151: "RAZINGS_BY_P8",
152: "LOST_UNITS_SCORE",
153: "LOST_BUILDINGS_SCORE",
154: "LOST_UNITS",
155: "LOST_BUILDINGS",
156: "TRIBUTE_FROM_P1",
157: "TRIBUTE_FROM_P2",
158: "TRIBUTE_FROM_P3",
159: "TRIBUTE_FROM_P4",
160: "TRIBUTE_FROM_P5",
161: "TRIBUTE_FROM_P6",
162: "TRIBUTE_FROM_P7",
163: "TRIBUTE_FROM_P8",
164: "SCORE_UNITS_CURRENT",
165: "SCORE_BUILDINGS_CURRENT", #default: 275
166: "COLLECTED_FOOD",
167: "COLLECTED_WOOD",
168: "COLLECTED_STONE",
169: "COLLECTED_GOLD",
170: "SCORE_MILITARY",
171: "TRIBUTE_RECEIVED",
172: "SCORE_RAZINGS",
173: "TOTAL_CASTLES",
174: "TOTAL_WONDERS",
175: "SCORE_ECONOMY_TRIBUTES",
176: "CONVERT_ADJUSTMENT_MIN", #used for resistance against monk conversions
177: "CONVERT_ADJUSTMENT_MAX",
178: "CONVERT_RESIST_ADJUSTMENT_MIN",
179: "CONVERT_RESIST_ADJUSTMENT_MAX",
180: "CONVERT_BUILDIN_MIN", #default: 15
181: "CONVERT_BUILDIN_MAX", #default: 25
182: "CONVERT_BUILDIN_CHANCE", #default: 25
183: "REVEAL_ENEMY",
184: "SCORE_SOCIETY",
185: "SCORE_FOOD",
186: "SCORE_WOOD",
187: "SCORE_STONE",
188: "SCORE_GOLD",
189: "CHOPPING_PRODUCTIVITY", #default: 1
190: "FOOD_GATHERING_PRODUCTIVITY", #default: 1
191: "RELIC_GOLD_PRODUCTION_RATE", #default: 30
192: "HERESY_ACTIVE", #bool
193: "THEOCRACY_ACTIVE", #bool
194: "CRENELLATIONS_ACTIVE", #bool
195: "CONSTRUCTION_RATE", #except for wonders
196: "WONDER_BONUS",
197: "SPIES_DISCOUNT", #or atheism_active?
}
)),
(dataformat.READ, "amount", "int16_t"),
(dataformat.READ, "enabled", "int16_t"),
)
class BuildingAnnex(dataformat.Exportable):
name_struct = "building_annex"
name_struct_file = "unit"
struct_description = "a possible building annex."
data_format = (
(dataformat.READ_EXPORT, "unit_id", "int16_t"),
(dataformat.READ_EXPORT, "misplaced0", "float"),
(dataformat.READ_EXPORT, "misplaced1", "float"),
)
def __init__(self, **args):
super().__init__(**args)
class UnitObject(dataformat.Exportable):
"""
base properties for every unit entry.
"""
name_struct = "unit_object"
name_struct_file = "unit"
struct_description = "base properties for all units."
data_format = (
(dataformat.READ, "name_length", "uint16_t"),
(dataformat.READ_EXPORT, "id0", "int16_t"),
(dataformat.READ_EXPORT, "language_dll_name", "uint16_t"),
(dataformat.READ_EXPORT, "language_dll_creation", "uint16_t"),
(dataformat.READ_EXPORT, "unit_class", dataformat.EnumLookupMember(
raw_type = "int16_t",
type_name = "unit_classes",
lookup_dict = {
0: "ARCHER",
1: "ARTIFACT",
2: "TRADE_BOAT",
3: "BUILDING",
4: "CIVILIAN",
5: "SEA_FISH",
6: "SOLDIER",
7: "BERRY_BUSH",
8: "STONE_MINE",
9: "PREY_ANIMAL",
10: "PREDATOR_ANIMAL",
11: "OTHER",
12: "CAVALRY",
13: "SIEGE_WEAPON",
14: "TERRAIN",
15: "TREES",
18: "PRIEST",
19: "TRADE_CART",
20: "TRANSPORT_BOAT",
21: "FISHING_BOAT",
22: "WAR_BOAT",
23: "CONQUISTADOR",
27: "WALLS",
28: "PHALANX",
29: "ANIMAL_DOMESTICATED",
30: "FLAGS",
32: "GOLD_MINE",
33: "SHORE_FISH",
34: "CLIFF",
35: "PETARD",
36: "CAVALRY_ARCHER",
37: "DOLPHIN",
38: "BIRDS",
39: "GATES",
40: "PILES",
41: "PILES_OF_RESOURCE",
42: "RELIC",
43: "MONK_WITH_RELIC",
44: "HAND_CANNONEER",
45: "TWO_HANDED_SWORD",
46: "PIKEMAN",
47: "SCOUT_CAVALRY",
48: "ORE_MINE",
49: "FARM",
50: "SPEARMAN",
51: "PACKED_SIEGE_UNITS",
52: "TOWER",
53: "BOARDING_BOAT",
54: "UNPACKED_SIEGE_UNITS",
55: "SCORPION",
56: "RAIDER",
57: "CAVALRY_RAIDER",
58: "SHEEP",
59: "KING",
61: "HORSE",
},
)),
(dataformat.READ_EXPORT, "graphic_standing0", "int16_t"),
(dataformat.READ_EXPORT, "graphic_standing1", "int16_t"),
(dataformat.READ_EXPORT, "graphic_dying0", "int16_t"),
(dataformat.READ_EXPORT, "graphic_dying1", "int16_t"),
(dataformat.READ, "death_mode", "int8_t"), #1 = become `dead_unit_id` (reviving does not make it usable again)
(dataformat.READ_EXPORT, "hit_points", "int16_t"), #unit health. -1=insta-die
(dataformat.READ, "line_of_sight", "float"),
(dataformat.READ, "garrison_capacity", "int8_t"), #number of units that can garrison in there
(dataformat.READ_EXPORT, "radius_size0", "float"), #size of the unit
(dataformat.READ_EXPORT, "radius_size1", "float"),
(dataformat.READ, "hp_bar_height0", "float"), #vertical hp bar distance from ground
(dataformat.READ_EXPORT, "sound_creation0", "int16_t"),
(dataformat.READ_EXPORT, "sound_creation1", "int16_t"),
(dataformat.READ, "dead_unit_id", "int16_t"), #unit id to become on death
(dataformat.READ, "placement_mode", "int8_t"), #0=placable on top of others in scenario editor, 5=can't
(dataformat.READ, "air_mode", "int8_t"), #1=no footprints
(dataformat.READ, "icon_id", "int16_t"), #frame id of the icon slp (57029) to place on the creation button
(dataformat.READ, "hidden_in_editor", "int8_t"),
(dataformat.READ_UNKNOWN, None, "int16_t"),
(dataformat.READ, "enabled", "int16_t"), #0=unlocked by research, 1=insta-available
(dataformat.READ, "placement_bypass_terrain0", "int16_t"), #terrain id that's needed somewhere on the foundation (e.g. dock water)
(dataformat.READ, "placement_bypass_terrain1", "int16_t"), #second slot for ^
(dataformat.READ, "placement_terrain0", "int16_t"), #terrain needed for placement (e.g. dock: water)
(dataformat.READ, "placement_terrain1", "int16_t"), #alternative terrain needed for placement (e.g. dock: shallows)
(dataformat.READ, "editor_radius0", "float"),
(dataformat.READ, "editor_radius1", "float"),
(dataformat.READ_EXPORT, "building_mode", dataformat.EnumLookupMember(
raw_type = "int8_t",
type_name = "building_modes",
lookup_dict = {
0: "NON_BUILDING", #gates, farms, walls, towers
2: "TRADE_BUILDING", #towncenter, port, trade workshop
3: "ANY",
},
)),
(dataformat.READ_EXPORT, "visible_in_fog", dataformat.EnumLookupMember(
raw_type = "int8_t",
type_name = "fog_visibility",
lookup_dict = {
0: "INVISIBLE", #people etc
1: "VISIBLE", #buildings
3: "ONLY_IN_FOG",
},
)),
(dataformat.READ_EXPORT, "terrain_restriction", dataformat.EnumLookupMember(
raw_type = "int16_t", #determines on what type of ground the unit can be placed/walk
type_name = "ground_type", #is actually the id of the terrain_restriction entry!
lookup_dict = {
0x00: "ANY",
0x01: "SHORELINE",
0x02: "WATER",
0x03: "WATER_SHIP_0x03",
0x04: "FOUNDATION",
0x05: "NOWHERE", #can't place anywhere
0x06: "WATER_DOCK", #shallow water for dock placement
0x07: "SOLID",
0x08: "NO_ICE_0x08",
0x0A: "NO_ICE_0x0A",
0x0B: "FOREST",
0x0C: "UNKNOWN_0x0C",
0x0D: "WATER_0x0D", #great fish
0x0E: "UNKNOWN_0x0E",
0x0F: "WATER_SHIP_0x0F", #transport ship
0x10: "GRASS_SHORELINE", #for gates and walls
0x11: "WATER_ANY_0x11",
0x12: "UNKNOWN_0x12",
0x13: "FISH_NO_ICE",
0x14: "WATER_ANY_0x14",
0x15: "WATER_SHALLOW",
},
)),
(dataformat.READ_EXPORT, "fly_mode", "int8_t"),
(dataformat.READ_EXPORT, "ressource_capacity", "int16_t"),
(dataformat.READ_EXPORT, "ressource_decay", "float"), #when animals rot, their ressources decay
(dataformat.READ_EXPORT, "blast_type", dataformat.EnumLookupMember(
raw_type = "int8_t",
type_name = "blast_types",
lookup_dict = {
0: "UNIT_0", #projectile, dead, fish, relic, tree, gate, towncenter
1: "OTHER", #'other' things with multiple rotations
2: "BUILDING", #buildings, gates, walls, towncenter, fishtrap
3: "UNIT_3", #boar, farm, fishingship, villager, tradecart, sheep, turkey, archers, junk, ships, monk, siege
}
)),
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ_EXPORT, "interaction_mode", dataformat.EnumLookupMember(
raw_type = "int8_t", #what can be done with this unit?
type_name = "interaction_modes",
lookup_dict = {
0: "NOTHING_0",
1: "NOTHING_1",
2: "SELECTABLE",
3: "SELECT_ATTACK",
4: "SELECT_ATTACK_MOVE",
5: "SELECT_MOVE",
},
)),
(dataformat.READ_EXPORT, "minimap_mode", dataformat.EnumLookupMember(
raw_type = "int8_t", #how does the unit show up on the minimap
type_name = "minimap_modes",
lookup_dict = {
0: "NO_DOT_0",
1: "SQUARE_DOT", #turns white when selected
2: "DIAMOND_DOT", #dito
3: "DIAMOND_DOT_KEEPCOLOR",
4: "LARGEDOT_0", #observable by all players, no attack-blinking
5: "LARGEDOT_1",
6: "NO_DOT_6",
7: "NO_DOT_7",
8: "NO_DOT_8",
9: "NO_DOT_9",
10: "NO_DOT_10",
},
)),
(dataformat.READ_EXPORT, "command_attribute", dataformat.EnumLookupMember(
raw_type = "int16_t", #selects the available ui command buttons for the unit
type_name = "command_attributes",
lookup_dict = {
0: "LIVING", #commands: delete, garrison, stop, attributes: hit points
1: "ANIMAL", #animal
2: "NONMILITARY_BULIDING", #nonmilitary building (build page 1)
3: "VILLAGER", #villager
4: "MILITARY_UNIT", #military unit
5: "TRADING_UNIT", #trading unit
6: "MONK_EMPTY", #monk
7: "TRANSPORT_SHIP", #transport ship
8: "RELIC", #relic / monk with relic
9: "FISHING_SHIP", #fishing ship
10: "MILITARY_BUILDING", #military building (build page 2)
11: "SHIELDED_BUILDING", #shield building (build page 3)
},
)),
(dataformat.READ_UNKNOWN, None, "int16_t"),
(dataformat.READ_UNKNOWN, None, "int16_t"),
(dataformat.READ_EXPORT, "language_dll_help", "uint16_t"),
(dataformat.READ, "hot_keys", "int16_t[4]"),
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ, "unselectable", "uint8_t"),
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ_UNKNOWN, None, "int8_t"),
#bit 0 == 1 && val != 7: mask shown behind buildings,
#bit 0 == 0 && val != {6, 10}: no mask displayed,
#val == {-1, 7}: in open area mask is partially displayed
#val == {6, 10}: building, causes mask to appear on units behind it
(dataformat.READ, "selection_mask", "int8_t"),
(dataformat.READ, "selection_shape_type", "int8_t"),
(dataformat.READ, "selection_shape", "int8_t"), #0=square, 1<=round
#bitfield of unit attributes:
#bit 0: allow garrison,
#bit 1: don't join formation,
#bit 2: stealth unit,
#bit 3: detector unit,
#bit 4: mechanical unit,
#bit 5: biological unit,
#bit 6: self-shielding unit,
#bit 7: invisible unit
(dataformat.READ, "attribute", "uint8_t"),
(dataformat.READ, "civilisation", "int8_t"),
(dataformat.READ_UNKNOWN, None, "int16_t"),
(dataformat.READ_EXPORT, "selection_effect", dataformat.EnumLookupMember(
raw_type = "int8_t", #things that happen when the unit was selected
type_name = "selection_effects",
lookup_dict = {
0: "NONE",
1: "HPBAR_ON_OUTLINE_DARK", #permanent, editor only
2: "HPBAR_ON_OUTLINE_NORMAL",
3: "HPBAR_OFF_SELECTION_SHADOW",
4: "HPBAR_OFF_OUTLINE_NORMAL",
5: "HPBAR_ON_5",
6: "HPBAR_OFF_6",
7: "HPBAR_OFF_7",
8: "HPBAR_ON_8",
9: "HPBAR_ON_9",
},
)),
(dataformat.READ, "editor_selection_color", "uint8_t"), #0: default, -16: fish trap, farm, 52: deadfarm, OLD-*, 116: flare, whale, dolphin -123: fish
(dataformat.READ, "selection_radius0", "float"),
(dataformat.READ, "selection_radius1", "float"),
(dataformat.READ, "hp_bar_height1", "float"), #vertical hp bar distance from ground
(dataformat.READ_EXPORT, "ressource_storage", dataformat.SubdataMember(
ref_type=RessourceStorage,
length=3,
)),
(dataformat.READ, "damage_graphic_count", "int8_t"),
(dataformat.READ_EXPORT, "damage_graphic", dataformat.SubdataMember(
ref_type=DamageGraphic,
length="damage_graphic_count",
)),
(dataformat.READ_EXPORT, "sound_selection", "int16_t"),
(dataformat.READ_EXPORT, "sound_dying", "int16_t"),
(dataformat.READ_EXPORT, "attack_mode", "int16_t"), #0: no attack, 1: attack by following, 2: run when attacked, 3:?, 4: attack
(dataformat.READ_EXPORT, "name", "char[name_length]"),
(dataformat.READ_EXPORT, "id1", "int16_t"),
(dataformat.READ_EXPORT, "id2", "int16_t"),
)
def __init__(self, **args):
super().__init__(**args)
class UnitFlag(UnitObject):
"""
type_id >= 20
"""
name_struct = "unit_flag"
name_struct_file = "unit"
struct_description = "adds speed property to units."
data_format = (
(dataformat.READ_EXPORT, None, dataformat.IncludeMembers(cls=UnitObject)),
(dataformat.READ_EXPORT, "speed", "float"),
)
def __init__(self, **args):
super().__init__(**args)
class UnitDoppelganger(UnitFlag):
"""
type_id >= 25
"""
name_struct = "unit_doppelganger"
name_struct_file = "unit"
struct_description = "weird doppelganger unit thats actually the same as a flag unit."
data_format = (
(dataformat.READ_EXPORT, None, dataformat.IncludeMembers(cls=UnitFlag)),
)
def __init__(self):
super().__init__()
class UnitDeadOrFish(UnitDoppelganger):
"""
type_id >= 30
"""
name_struct = "unit_dead_or_fish"
name_struct_file = "unit"
struct_description = "adds walking graphics, rotations and tracking properties to units."
data_format = (
(dataformat.READ_EXPORT, None, dataformat.IncludeMembers(cls=UnitDoppelganger)),
(dataformat.READ_EXPORT, "walking_graphics0", "int16_t"),
(dataformat.READ_EXPORT, "walking_graphics1", "int16_t"),
(dataformat.READ, "rotation_speed", "float"),
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ, "tracking_unit_id", "int16_t"), #unit id what for the ground traces are for
(dataformat.READ, "tracking_unit_used", "uint8_t"), #-1: no tracking present, 2: projectiles with tracking unit
(dataformat.READ, "tracking_unit_density", "float"), #0: no tracking, 0.5: trade cart, 0.12: some projectiles, 0.4: other projectiles
(dataformat.READ_UNKNOWN, None, "float"),
(dataformat.READ_UNKNOWN, None, "int8_t[17]"),
)
def __init__(self, **args):
super().__init__(**args)
class UnitBird(UnitDeadOrFish):
"""
type_id >= 40
"""
name_struct = "unit_bird"
name_struct_file = "unit"
struct_description = "adds search radius and work properties, as well as movement sounds."
data_format = (
(dataformat.READ_EXPORT, None, dataformat.IncludeMembers(cls=UnitDeadOrFish)),
(dataformat.READ, "sheep_conversion", "int16_t"), #0=can be converted by unit command 107 (you found sheep!!1)
(dataformat.READ, "search_radius", "float"),
(dataformat.READ, "work_rate", "float"),
(dataformat.READ, "drop_site0", "int16_t"), #unit id where gathered ressources shall be delivered to
(dataformat.READ, "drop_site1", "int16_t"), #alternative unit id
(dataformat.READ_EXPORT, "villager_mode", "int8_t"), #unit can switch villager type (holza? gathara!) 1=male, 2=female
(dataformat.READ_EXPORT, "move_sound", "int16_t"),
(dataformat.READ_EXPORT, "stop_sound", "int16_t"),
(dataformat.READ, "animal_mode", "int8_t"),
)
def __init__(self, **args):
super().__init__(**args)
class UnitMovable(UnitBird):
"""
type_id >= 60
"""
name_struct = "unit_movable"
name_struct_file = "unit"
struct_description = "adds attack and armor properties to units."
data_format = (
(dataformat.READ_EXPORT, None, dataformat.IncludeMembers(cls=UnitBird)),
(dataformat.READ, "default_armor", "int16_t"),
(dataformat.READ, "attack_count", "uint16_t"),
(dataformat.READ, "attacks", dataformat.SubdataMember(ref_type=HitType, length="attack_count")),
(dataformat.READ, "armor_count", "uint16_t"),
(dataformat.READ, "armors", dataformat.SubdataMember(ref_type=HitType, length="armor_count")),
(dataformat.READ_EXPORT, "interaction_type", dataformat.EnumLookupMember(
raw_type = "int16_t",
type_name = "interaction_types",
lookup_dict = {
-1: "UNIT",
4: "BUILDING",
6: "DOCK",
10: "WALL",
},
)),
(dataformat.READ, "max_range", "float"),
(dataformat.READ, "blast_radius", "float"),
(dataformat.READ, "reload_time0", "float"),
(dataformat.READ, "projectile_unit_id", "int16_t"),
(dataformat.READ, "accuracy_percent", "int16_t"), #probablity of attack hit
(dataformat.READ, "tower_mode", "int8_t"),
(dataformat.READ, "delay", "int16_t"), #delay in frames before projectile is shot
(dataformat.READ, "projectile_graphics_displacement_lr", "float"),
(dataformat.READ, "projectile_graphics_displacement_distance", "float"),
(dataformat.READ, "projectile_graphics_displacement_height", "float"),
(dataformat.READ_EXPORT, "blast_level", dataformat.EnumLookupMember(
raw_type = "int8_t",
type_name = "range_damage_type",
lookup_dict = {
0: "RESSOURCES",
1: "TREES",
2: "NEARBY_UNITS",
3: "TARGET_ONLY",
},
)),
(dataformat.READ, "min_range", "float"),
(dataformat.READ, "garrison_recovery_rate", "float"),
(dataformat.READ_EXPORT, "attack_graphic", "int16_t"),
(dataformat.READ, "melee_armor_displayed", "int16_t"),
(dataformat.READ, "attack_displayed", "int16_t"),
(dataformat.READ, "range_displayed", "float"),
(dataformat.READ, "reload_time1", "float"),
)
def __init__(self):
super().__init__()
class UnitProjectile(UnitMovable):
"""
type_id == 60
"""
name_struct = "unit_projectile"
name_struct_file = "unit"
struct_description = "adds projectile specific unit properties."
data_format = (
(dataformat.READ_EXPORT, None, dataformat.IncludeMembers(cls=UnitMovable)),
(dataformat.READ, "stretch_mode", "int8_t"), #1 = projectile falls vertically to the bottom of the map
(dataformat.READ, "compensation_mode", "int8_t"),
(dataformat.READ, "drop_animation_mode", "int8_t"), #1 = disappear on hit
(dataformat.READ, "penetration_mode", "int8_t"), #1 = pass through hit object
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ, "projectile_arc", "float"),
)
def __init__(self):
super().__init__()
class UnitLiving(UnitMovable):
"""
type_id >= 70
"""
name_struct = "unit_living"
name_struct_file = "unit"
struct_description = "adds creation location and garrison unit properties."
data_format = (
(dataformat.READ_EXPORT, None, dataformat.IncludeMembers(cls=UnitMovable)),
(dataformat.READ, "ressource_cost", dataformat.SubdataMember(ref_type=RessourceCost, length=3)),
(dataformat.READ, "creation_time", "int16_t"), #in seconds
(dataformat.READ, "creation_location_id", "int16_t"), #e.g. 118 = villager
#where to place the button with the given icon
#creation page:
#+------------------------+
#| 01 | 02 | 03 | 04 | 05 |
#|----|----|----|----|----|
#| 06 | 07 | 08 | 09 | 10 |
#|----|----|----|----|----|
#| 11 | 12 | 13 | 14 | 15 |
#+------------------------+
#
#additional page (dock):
#+------------------------+
#| 21 | 22 | 23 | 24 | 25 |
#|----|----|----|----|----|
#| 26 | 27 | 28 | 29 | 30 |
#|----|----|----|----|----|
#| 31 | 32 | 33 | 34 | 35 |
#+------------------------+
(dataformat.READ, "creation_button_id", "int8_t"),
(dataformat.READ_UNKNOWN, None, "int32_t"),
(dataformat.READ_UNKNOWN, None, "int32_t"),
(dataformat.READ, "missile_graphic_delay", "int8_t"), #delay before the projectile is fired.
(dataformat.READ, "hero_mode", "int8_t"), #if building: "others" tab in editor, if living unit: "heroes" tab, regenerate health + monk immunity
(dataformat.READ, "garrison_graphic", "int32_t"), #graphic to display when units are garrisoned
(dataformat.READ, "attack_missile_duplication_min", "float"), #projectile duplication when nothing garrisoned
(dataformat.READ, "attack_missile_duplication_max", "int8_t"), #duplication when fully garrisoned
(dataformat.READ, "attack_missile_duplication_spawning_width", "float"),
(dataformat.READ, "attack_missile_duplication_spawning_length", "float"),
(dataformat.READ, "attack_missile_duplication_spawning_randomness", "float"), #placement randomness, 0=from single spot, 1=random, 1<less random
(dataformat.READ, "attack_missile_duplication_unit_id", "int32_t"),
(dataformat.READ, "attack_missile_duplication_graphic_id", "int32_t"),
(dataformat.READ, "dynamic_image_update", "int8_t"), #determines adjacent unit graphics, if 1: building can adapt graphics by adjacent buildings
(dataformat.READ, "pierce_armor_displayed", "int16_t"), #unit stats display of pierce armor
)
def __init__(self):
super().__init__()
class UnitBuilding(UnitLiving):
"""
type_id >= 80
"""
name_struct = "unit_building"
name_struct_file = "unit"
struct_description = "construction graphics and garrison building properties for units."
data_format = (
(dataformat.READ_EXPORT, None, dataformat.IncludeMembers(cls=UnitLiving)),
(dataformat.READ_EXPORT, "construction_graphic_id", "int16_t"),
(dataformat.READ, "snow_graphic_id", "int16_t"),
(dataformat.READ, "adjacent_mode", "int16_t"), #1=adjacent units may change the graphics
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ, "stack_unit_id", "int16_t"), #second building to place directly on top
(dataformat.READ_EXPORT, "terrain_id", "int16_t"), #change underlying terrain to this id when building completed
(dataformat.READ_UNKNOWN, None, "int16_t"),
(dataformat.READ, "research_id", "int16_t"), #research_id to be enabled when building creation
(dataformat.READ_UNKNOWN, None, "int8_t"),
(dataformat.READ_EXPORT, "building_annex", dataformat.SubdataMember(ref_type=BuildingAnnex, length=4)),
(dataformat.READ, "head_unit_id", "int16_t"), #building at which an annex building is attached to
(dataformat.READ, "transform_unit_id", "int16_t"), #destination unit id when unit shall transform (e.g. unpack)
(dataformat.READ_UNKNOWN, None, "int16_t"),
(dataformat.READ, "construction_sound_id", "int16_t"),
(dataformat.READ_EXPORT, "garrison_type", dataformat.EnumLookupMember(
raw_type = "int8_t",
type_name = "garrison_types",
lookup_dict = { #TODO: create bitfield
0x00: "NONE",
0x01: "VILLAGER",
0x02: "INFANTRY",
0x04: "CAVALRY",
0x08: "MONK",
0x0b: "NOCAVALRY",
0x0f: "ALL",
},
)),
(dataformat.READ, "garrison_heal_rate", "float"),
(dataformat.READ_UNKNOWN, None, "int32_t"),
(dataformat.READ_UNKNOWN, None, "int16_t"),
(dataformat.READ_UNKNOWN, None, "int8_t[6]"),
)
def __init__(self):
super().__init__()
class UnitTree(UnitObject):
"""
type_id = 90
"""
name_struct = "unit_tree"
name_struct_file = "unit"
struct_description = "just a tree unit."
data_format = (
(dataformat.READ_EXPORT, None, dataformat.IncludeMembers(cls=UnitObject)),
)
def __init__(self, **args):
super().__init__(**args)
unit_type_lookup = {
10: "object",
20: "flag",
25: "doppelganger",
30: "dead_or_fish",
40: "bird",
60: "projectile",
70: "living",
80: "building",
90: "tree",
}
unit_type_class_lookup = {
"object": UnitObject,
"flag": UnitFlag,
"doppelganger": UnitDoppelganger,
"dead_or_fish": UnitDeadOrFish,
"bird": UnitBird,
"projectile": UnitProjectile,
"living": UnitLiving,
"building": UnitBuilding,
"tree": UnitTree,
}
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/bin/bash
# Copyright 2019-2024 Tauri Programme within The Commons Conservancy
# SPDX-License-Identifier: Apache-2.0
# SPDX-License-Identifier: MIT
git_output=$(git diff --ignore-submodules --name-only HEAD)
if [ -z "$git_output" ];
then
echo "✔ working directory is clean"
else
echo "✘ found diff:"
echo "$git_output"
exit 1
fi
|
unknown
|
github
|
https://github.com/tauri-apps/tauri
|
.scripts/ci/has-diff.sh
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
libcontainercgroups "github.com/opencontainers/cgroups"
"github.com/opencontainers/cgroups/fscommon"
libcontainercgroupmanager "github.com/opencontainers/cgroups/manager"
cgroupsystemd "github.com/opencontainers/cgroups/systemd"
"k8s.io/klog/v2"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/kubelet/metrics"
)
const (
// systemdSuffix is the cgroup name suffix for systemd
systemdSuffix string = ".slice"
// Cgroup2MemoryMin is memory.min for cgroup v2
Cgroup2MemoryMin string = "memory.min"
// Cgroup2MemoryHigh is memory.high for cgroup v2
Cgroup2MemoryHigh string = "memory.high"
Cgroup2MaxCpuLimit string = "max"
Cgroup2MaxSwapFilename string = "memory.swap.max"
)
var RootCgroupName = CgroupName([]string{})
// NewCgroupName composes a new cgroup name.
// Use RootCgroupName as base to start at the root.
// This function does some basic check for invalid characters at the name.
func NewCgroupName(base CgroupName, components ...string) CgroupName {
for _, component := range components {
// Forbit using "_" in internal names. When remapping internal
// names to systemd cgroup driver, we want to remap "-" => "_",
// so we forbid "_" so that we can always reverse the mapping.
if strings.Contains(component, "/") || strings.Contains(component, "_") {
panic(fmt.Errorf("invalid character in component [%q] of CgroupName", component))
}
}
return CgroupName(append(append([]string{}, base...), components...))
}
func escapeSystemdCgroupName(part string) string {
return strings.Replace(part, "-", "_", -1)
}
func unescapeSystemdCgroupName(part string) string {
return strings.Replace(part, "_", "-", -1)
}
// cgroupName.ToSystemd converts the internal cgroup name to a systemd name.
// For example, the name {"kubepods", "burstable", "pod1234-abcd-5678-efgh"} becomes
// "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1234_abcd_5678_efgh.slice"
// This function always expands the systemd name into the cgroupfs form. If only
// the last part is needed, use path.Base(...) on it to discard the rest.
func (cgroupName CgroupName) ToSystemd() string {
if len(cgroupName) == 0 || (len(cgroupName) == 1 && cgroupName[0] == "") {
return "/"
}
newparts := []string{}
for _, part := range cgroupName {
part = escapeSystemdCgroupName(part)
newparts = append(newparts, part)
}
result, err := cgroupsystemd.ExpandSlice(strings.Join(newparts, "-") + systemdSuffix)
if err != nil {
// Should never happen...
panic(fmt.Errorf("error converting cgroup name [%v] to systemd format: %v", cgroupName, err))
}
return result
}
func ParseSystemdToCgroupName(name string) CgroupName {
driverName := path.Base(name)
driverName = strings.TrimSuffix(driverName, systemdSuffix)
parts := strings.Split(driverName, "-")
result := []string{}
for _, part := range parts {
result = append(result, unescapeSystemdCgroupName(part))
}
return CgroupName(result)
}
func (cgroupName CgroupName) ToCgroupfs() string {
return "/" + path.Join(cgroupName...)
}
func ParseCgroupfsToCgroupName(name string) CgroupName {
components := strings.Split(strings.TrimPrefix(name, "/"), "/")
if len(components) == 1 && components[0] == "" {
components = []string{}
}
return CgroupName(components)
}
func IsSystemdStyleName(name string) bool {
return strings.HasSuffix(name, systemdSuffix)
}
// CgroupSubsystems holds information about the mounted cgroup subsystems
type CgroupSubsystems struct {
// Cgroup subsystem mounts.
// e.g.: "/sys/fs/cgroup/cpu" -> ["cpu", "cpuacct"]
Mounts []libcontainercgroups.Mount
// Cgroup subsystem to their mount location.
// e.g.: "cpu" -> "/sys/fs/cgroup/cpu"
MountPoints map[string]string
}
// cgroupCommon implements common tasks
// that are valid for both cgroup v1 and v2.
// This prevents duplicating the code between
// v1 and v2 specific implementations.
type cgroupCommon struct {
// subsystems holds information about all the
// mounted cgroup subsystems on the node
subsystems *CgroupSubsystems
// useSystemd tells if systemd cgroup manager should be used.
useSystemd bool
}
// Make sure that cgroupV1impl and cgroupV2impl implement the CgroupManager interface
var _ CgroupManager = &cgroupV1impl{}
var _ CgroupManager = &cgroupV2impl{}
// NewCgroupManager is a factory method that returns a CgroupManager
func NewCgroupManager(logger klog.Logger, cs *CgroupSubsystems, cgroupDriver string) CgroupManager {
if libcontainercgroups.IsCgroup2UnifiedMode() {
return NewCgroupV2Manager(logger, cs, cgroupDriver)
}
return NewCgroupV1Manager(logger, cs, cgroupDriver)
}
func newCgroupCommon(logger klog.Logger, cs *CgroupSubsystems, cgroupDriver string) cgroupCommon {
return cgroupCommon{
subsystems: cs,
useSystemd: cgroupDriver == "systemd",
}
}
// Name converts the cgroup to the driver specific value in cgroupfs form.
// This always returns a valid cgroupfs path even when systemd driver is in use!
func (m *cgroupCommon) Name(name CgroupName) string {
if m.useSystemd {
return name.ToSystemd()
}
return name.ToCgroupfs()
}
// CgroupName converts the literal cgroupfs name on the host to an internal identifier.
func (m *cgroupCommon) CgroupName(name string) CgroupName {
if m.useSystemd {
return ParseSystemdToCgroupName(name)
}
return ParseCgroupfsToCgroupName(name)
}
// buildCgroupPaths builds a path to each cgroup subsystem for the specified name.
func (m *cgroupCommon) buildCgroupPaths(name CgroupName) map[string]string {
cgroupFsAdaptedName := m.Name(name)
cgroupPaths := make(map[string]string, len(m.subsystems.MountPoints))
for key, val := range m.subsystems.MountPoints {
cgroupPaths[key] = path.Join(val, cgroupFsAdaptedName)
}
return cgroupPaths
}
// libctCgroupConfig converts CgroupConfig to libcontainer's Cgroup config.
func (m *cgroupCommon) libctCgroupConfig(logger klog.Logger, in *CgroupConfig, needResources bool) *libcontainercgroups.Cgroup {
config := &libcontainercgroups.Cgroup{
Systemd: m.useSystemd,
}
if needResources {
config.Resources = m.toResources(logger, in.ResourceParameters)
} else {
config.Resources = &libcontainercgroups.Resources{}
}
if !config.Systemd {
// For fs cgroup manager, we can either set Path or Name and Parent.
// Setting Path is easier.
config.Path = in.Name.ToCgroupfs()
return config
}
// For systemd, we have to set Name and Parent, as they are needed to talk to systemd.
// Setting Path is optional as it can be deduced from Name and Parent.
// TODO(filbranden): This logic belongs in libcontainer/cgroup/systemd instead.
// It should take a libcontainerconfigs.Cgroup.Path field (rather than Name and Parent)
// and split it appropriately, using essentially the logic below.
// This was done for cgroupfs in opencontainers/runc#497 but a counterpart
// for systemd was never introduced.
dir, base := path.Split(in.Name.ToSystemd())
if dir == "/" {
dir = "-.slice"
} else {
dir = path.Base(dir)
}
config.Parent = dir
config.Name = base
return config
}
// Destroy destroys the specified cgroup
func (m *cgroupCommon) Destroy(logger klog.Logger, cgroupConfig *CgroupConfig) error {
start := time.Now()
defer func() {
metrics.CgroupManagerDuration.WithLabelValues("destroy").Observe(metrics.SinceInSeconds(start))
}()
libcontainerCgroupConfig := m.libctCgroupConfig(logger, cgroupConfig, false)
manager, err := libcontainercgroupmanager.New(libcontainerCgroupConfig)
if err != nil {
return err
}
// Delete cgroups using libcontainers Managers Destroy() method
if err = manager.Destroy(); err != nil {
return fmt.Errorf("unable to destroy cgroup paths for cgroup %v : %v", cgroupConfig.Name, err)
}
return nil
}
func (m *cgroupCommon) SetCgroupConfig(logger klog.Logger, name CgroupName, resourceConfig *ResourceConfig) error {
containerConfig := &CgroupConfig{
Name: name,
ResourceParameters: resourceConfig,
}
return m.Update(logger, containerConfig)
}
// getCPUWeight converts from the range [2, 262144] to [1, 10000]
func getCPUWeight(cpuShares *uint64) uint64 {
if cpuShares == nil {
return 0
}
if *cpuShares >= 262144 {
return 10000
}
return 1 + ((*cpuShares-2)*9999)/262142
}
var (
availableRootControllersOnce sync.Once
availableRootControllers sets.Set[string]
)
func (m *cgroupCommon) toResources(logger klog.Logger, resourceConfig *ResourceConfig) *libcontainercgroups.Resources {
resources := &libcontainercgroups.Resources{
SkipDevices: true,
SkipFreezeOnSet: true,
}
if resourceConfig == nil {
return resources
}
if resourceConfig.Memory != nil {
resources.Memory = *resourceConfig.Memory
}
if resourceConfig.CPUShares != nil {
if libcontainercgroups.IsCgroup2UnifiedMode() {
resources.CpuWeight = getCPUWeight(resourceConfig.CPUShares)
} else {
resources.CpuShares = *resourceConfig.CPUShares
}
}
if resourceConfig.CPUQuota != nil {
resources.CpuQuota = *resourceConfig.CPUQuota
}
if resourceConfig.CPUPeriod != nil {
resources.CpuPeriod = *resourceConfig.CPUPeriod
}
if resourceConfig.PidsLimit != nil {
resources.PidsLimit = resourceConfig.PidsLimit
}
if !resourceConfig.CPUSet.IsEmpty() {
resources.CpusetCpus = resourceConfig.CPUSet.String()
}
m.maybeSetHugetlb(logger, resourceConfig, resources)
// Ideally unified is used for all the resources when running on cgroup v2.
// It doesn't make difference for the memory.max limit, but for e.g. the cpu controller
// you can specify the correct setting without relying on the conversions performed by the OCI runtime.
if resourceConfig.Unified != nil && libcontainercgroups.IsCgroup2UnifiedMode() {
resources.Unified = make(map[string]string)
for k, v := range resourceConfig.Unified {
resources.Unified[k] = v
}
}
return resources
}
func (m *cgroupCommon) maybeSetHugetlb(logger klog.Logger, resourceConfig *ResourceConfig, resources *libcontainercgroups.Resources) {
// Check if hugetlb is supported.
if libcontainercgroups.IsCgroup2UnifiedMode() {
if !getSupportedUnifiedControllers().Has("hugetlb") {
logger.V(6).Info("Optional subsystem not supported: hugetlb")
return
}
} else if _, ok := m.subsystems.MountPoints["hugetlb"]; !ok {
logger.V(6).Info("Optional subsystem not supported: hugetlb")
return
}
// For each page size enumerated, set that value.
pageSizes := sets.New[string]()
for pageSize, limit := range resourceConfig.HugePageLimit {
sizeString, err := v1helper.HugePageUnitSizeFromByteSize(pageSize)
if err != nil {
logger.Info("Invalid pageSize", "err", err)
continue
}
resources.HugetlbLimit = append(resources.HugetlbLimit, &libcontainercgroups.HugepageLimit{
Pagesize: sizeString,
Limit: uint64(limit),
})
pageSizes.Insert(sizeString)
}
// for each page size omitted, limit to 0
for _, pageSize := range libcontainercgroups.HugePageSizes() {
if pageSizes.Has(pageSize) {
continue
}
resources.HugetlbLimit = append(resources.HugetlbLimit, &libcontainercgroups.HugepageLimit{
Pagesize: pageSize,
Limit: uint64(0),
})
}
}
// Update updates the cgroup with the specified Cgroup Configuration
func (m *cgroupCommon) Update(logger klog.Logger, cgroupConfig *CgroupConfig) error {
start := time.Now()
defer func() {
metrics.CgroupManagerDuration.WithLabelValues("update").Observe(metrics.SinceInSeconds(start))
}()
libcontainerCgroupConfig := m.libctCgroupConfig(logger, cgroupConfig, true)
manager, err := libcontainercgroupmanager.New(libcontainerCgroupConfig)
if err != nil {
return fmt.Errorf("failed to create cgroup manager: %v", err)
}
return manager.Set(libcontainerCgroupConfig.Resources)
}
// Create creates the specified cgroup
func (m *cgroupCommon) Create(logger klog.Logger, cgroupConfig *CgroupConfig) error {
start := time.Now()
defer func() {
metrics.CgroupManagerDuration.WithLabelValues("create").Observe(metrics.SinceInSeconds(start))
}()
libcontainerCgroupConfig := m.libctCgroupConfig(logger, cgroupConfig, true)
manager, err := libcontainercgroupmanager.New(libcontainerCgroupConfig)
if err != nil {
return err
}
// Apply(-1) is a hack to create the cgroup directories for each resource
// subsystem. The function [cgroups.Manager.apply()] applies cgroup
// configuration to the process with the specified pid.
// It creates cgroup files for each subsystems and writes the pid
// in the tasks file. We use the function to create all the required
// cgroup files but not attach any "real" pid to the cgroup.
if err := manager.Apply(-1); err != nil {
return err
}
// it may confuse why we call set after we do apply, but the issue is that runc
// follows a similar pattern. it's needed to ensure cpu quota is set properly.
if err := manager.Set(libcontainerCgroupConfig.Resources); err != nil {
utilruntime.HandleError(fmt.Errorf("cgroup manager.Set failed: %w", err))
}
return nil
}
// Scans through all subsystems to find pids associated with specified cgroup.
func (m *cgroupCommon) Pids(logger klog.Logger, name CgroupName) []int {
// we need the driver specific name
cgroupFsName := m.Name(name)
// Get a list of processes that we need to kill
pidsToKill := sets.New[int]()
var pids []int
for _, val := range m.subsystems.MountPoints {
dir := path.Join(val, cgroupFsName)
_, err := os.Stat(dir)
if os.IsNotExist(err) {
// The subsystem pod cgroup is already deleted
// do nothing, continue
continue
}
// Get a list of pids that are still charged to the pod's cgroup
pids, err = getCgroupProcs(dir)
if err != nil {
continue
}
pidsToKill.Insert(pids...)
// WalkFunc which is called for each file and directory in the pod cgroup dir
visitor := func(path string, info os.FileInfo, err error) error {
if err != nil {
logger.V(4).Info("Cgroup manager encountered error scanning cgroup path", "path", path, "err", err)
return filepath.SkipDir
}
if !info.IsDir() {
return nil
}
pids, err = getCgroupProcs(path)
if err != nil {
logger.V(4).Info("Cgroup manager encountered error getting procs for cgroup path", "path", path, "err", err)
return filepath.SkipDir
}
pidsToKill.Insert(pids...)
return nil
}
// Walk through the pod cgroup directory to check if
// container cgroups haven't been GCed yet. Get attached processes to
// all such unwanted containers under the pod cgroup
if err = filepath.Walk(dir, visitor); err != nil {
logger.V(4).Info("Cgroup manager encountered error scanning pids for directory", "path", dir, "err", err)
}
}
return sets.List(pidsToKill)
}
// ReduceCPULimits reduces the cgroup's cpu shares to the lowest possible value
func (m *cgroupCommon) ReduceCPULimits(logger klog.Logger, cgroupName CgroupName) error {
// Set lowest possible CpuShares value for the cgroup
minimumCPUShares := uint64(MinShares)
resources := &ResourceConfig{
CPUShares: &minimumCPUShares,
}
containerConfig := &CgroupConfig{
Name: cgroupName,
ResourceParameters: resources,
}
return m.Update(logger, containerConfig)
}
func readCgroupMemoryConfig(cgroupPath string, memLimitFile string) (*ResourceConfig, error) {
memLimit, err := fscommon.GetCgroupParamUint(cgroupPath, memLimitFile)
if err != nil {
return nil, fmt.Errorf("failed to read %s for cgroup %v: %v", memLimitFile, cgroupPath, err)
}
mLim := int64(memLimit)
//TODO(vinaykul,InPlacePodVerticalScaling): Add memory request support
return &ResourceConfig{Memory: &mLim}, nil
}
|
go
|
github
|
https://github.com/kubernetes/kubernetes
|
pkg/kubelet/cm/cgroup_manager_linux.go
|
"""
This module houses the GEOSCoordSeq object, which is used internally
by GEOSGeometry to house the actual coordinates of the Point,
LineString, and LinearRing geometries.
"""
from ctypes import byref, c_double, c_uint
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.libgeos import CS_PTR
from django.contrib.gis.shortcuts import numpy
from django.utils.six.moves import range
class GEOSCoordSeq(GEOSBase):
"The internal representation of a list of coordinates inside a Geometry."
ptr_type = CS_PTR
def __init__(self, ptr, z=False):
"Initializes from a GEOS pointer."
if not isinstance(ptr, CS_PTR):
raise TypeError('Coordinate sequence should initialize with a CS_PTR.')
self._ptr = ptr
self._z = z
def __iter__(self):
"Iterates over each point in the coordinate sequence."
for i in range(self.size):
yield self[i]
def __len__(self):
"Returns the number of points in the coordinate sequence."
return int(self.size)
def __str__(self):
"Returns the string representation of the coordinate sequence."
return str(self.tuple)
def __getitem__(self, index):
"Returns the coordinate sequence value at the given index."
coords = [self.getX(index), self.getY(index)]
if self.dims == 3 and self._z:
coords.append(self.getZ(index))
return tuple(coords)
def __setitem__(self, index, value):
"Sets the coordinate sequence value at the given index."
# Checking the input value
if isinstance(value, (list, tuple)):
pass
elif numpy and isinstance(value, numpy.ndarray):
pass
else:
raise TypeError('Must set coordinate with a sequence (list, tuple, or numpy array).')
# Checking the dims of the input
if self.dims == 3 and self._z:
n_args = 3
set_3d = True
else:
n_args = 2
set_3d = False
if len(value) != n_args:
raise TypeError('Dimension of value does not match.')
# Setting the X, Y, Z
self.setX(index, value[0])
self.setY(index, value[1])
if set_3d:
self.setZ(index, value[2])
# #### Internal Routines ####
def _checkindex(self, index):
"Checks the given index."
sz = self.size
if (sz < 1) or (index < 0) or (index >= sz):
raise IndexError('invalid GEOS Geometry index: %s' % str(index))
def _checkdim(self, dim):
"Checks the given dimension."
if dim < 0 or dim > 2:
raise GEOSException('invalid ordinate dimension "%d"' % dim)
# #### Ordinate getting and setting routines ####
def getOrdinate(self, dimension, index):
"Returns the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
return capi.cs_getordinate(self.ptr, index, dimension, byref(c_double()))
def setOrdinate(self, dimension, index, value):
"Sets the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
capi.cs_setordinate(self.ptr, index, dimension, value)
def getX(self, index):
"Get the X value at the index."
return self.getOrdinate(0, index)
def setX(self, index, value):
"Set X with the value at the given index."
self.setOrdinate(0, index, value)
def getY(self, index):
"Get the Y value at the given index."
return self.getOrdinate(1, index)
def setY(self, index, value):
"Set Y with the value at the given index."
self.setOrdinate(1, index, value)
def getZ(self, index):
"Get Z with the value at the given index."
return self.getOrdinate(2, index)
def setZ(self, index, value):
"Set Z with the value at the given index."
self.setOrdinate(2, index, value)
# ### Dimensions ###
@property
def size(self):
"Returns the size of this coordinate sequence."
return capi.cs_getsize(self.ptr, byref(c_uint()))
@property
def dims(self):
"Returns the dimensions of this coordinate sequence."
return capi.cs_getdims(self.ptr, byref(c_uint()))
@property
def hasz(self):
"""
Returns whether this coordinate sequence is 3D. This property value is
inherited from the parent Geometry.
"""
return self._z
# ### Other Methods ###
def clone(self):
"Clones this coordinate sequence."
return GEOSCoordSeq(capi.cs_clone(self.ptr), self.hasz)
@property
def kml(self):
"Returns the KML representation for the coordinates."
# Getting the substitution string depending on whether the coordinates have
# a Z dimension.
if self.hasz:
substr = '%s,%s,%s '
else:
substr = '%s,%s,0 '
return '<coordinates>%s</coordinates>' % \
''.join(substr % self[i] for i in range(len(self))).strip()
@property
def tuple(self):
"Returns a tuple version of this coordinate sequence."
n = self.size
if n == 1:
return self[0]
else:
return tuple(self[i] for i in range(n))
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright 2021 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wal
import (
"fmt"
"testing"
"github.com/coreos/go-semver/semver"
"github.com/golang/protobuf/proto" //nolint:staticcheck // TODO: remove for a supported version
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/reflect/protoreflect"
"go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/api/v3/membershippb"
"go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/pkg/v3/pbutil"
"go.etcd.io/raft/v3/raftpb"
)
func TestEtcdVersionFromEntry(t *testing.T) {
raftReq := etcdserverpb.InternalRaftRequest{Header: &etcdserverpb.RequestHeader{AuthRevision: 1}}
normalRequestData := pbutil.MustMarshal(&raftReq)
downgradeVersionTestV3_6Req := etcdserverpb.InternalRaftRequest{DowngradeVersionTest: &etcdserverpb.DowngradeVersionTestRequest{Ver: "3.6.0"}}
downgradeVersionTestV3_6Data := pbutil.MustMarshal(&downgradeVersionTestV3_6Req)
downgradeVersionTestV3_7Req := etcdserverpb.InternalRaftRequest{DowngradeVersionTest: &etcdserverpb.DowngradeVersionTestRequest{Ver: "3.7.0"}}
downgradeVersionTestV3_7Data := pbutil.MustMarshal(&downgradeVersionTestV3_7Req)
confChange := raftpb.ConfChange{Type: raftpb.ConfChangeAddLearnerNode}
confChangeData := pbutil.MustMarshal(&confChange)
confChangeV2 := raftpb.ConfChangeV2{Transition: raftpb.ConfChangeTransitionJointExplicit}
confChangeV2Data := pbutil.MustMarshal(&confChangeV2)
tcs := []struct {
name string
input raftpb.Entry
expect *semver.Version
}{
{
name: "Using RequestHeader AuthRevision in NormalEntry implies v3.1",
input: raftpb.Entry{
Term: 1,
Index: 2,
Type: raftpb.EntryNormal,
Data: normalRequestData,
},
expect: &version.V3_1,
},
{
name: "Setting downgradeTest version to 3.6 implies version within WAL",
input: raftpb.Entry{
Term: 1,
Index: 2,
Type: raftpb.EntryNormal,
Data: downgradeVersionTestV3_6Data,
},
expect: &version.V3_6,
},
{
name: "Setting downgradeTest version to 3.7 implies version within WAL",
input: raftpb.Entry{
Term: 1,
Index: 2,
Type: raftpb.EntryNormal,
Data: downgradeVersionTestV3_7Data,
},
expect: &version.V3_7,
},
{
name: "Using ConfigChange implies v3.0",
input: raftpb.Entry{
Term: 1,
Index: 2,
Type: raftpb.EntryConfChange,
Data: confChangeData,
},
expect: &version.V3_0,
},
{
name: "Using ConfigChangeV2 implies v3.4",
input: raftpb.Entry{
Term: 1,
Index: 2,
Type: raftpb.EntryConfChangeV2,
Data: confChangeV2Data,
},
expect: &version.V3_4,
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
var maxVer *semver.Version
err := visitEntry(tc.input, func(path protoreflect.FullName, ver *semver.Version) error {
maxVer = maxVersion(maxVer, ver)
return nil
})
require.NoError(t, err)
assert.Equal(t, tc.expect, maxVer)
})
}
}
func TestEtcdVersionFromMessage(t *testing.T) {
tcs := []struct {
name string
input proto.Message
expect *semver.Version
}{
{
name: "Empty RequestHeader impies v3.0",
input: &etcdserverpb.RequestHeader{},
expect: &version.V3_0,
},
{
name: "RequestHeader AuthRevision field set implies v3.5",
input: &etcdserverpb.RequestHeader{AuthRevision: 1},
expect: &version.V3_1,
},
{
name: "RequestHeader Username set implies v3.0",
input: &etcdserverpb.RequestHeader{Username: "Alice"},
expect: &version.V3_0,
},
{
name: "When two fields are set take higher version",
input: &etcdserverpb.RequestHeader{AuthRevision: 1, Username: "Alice"},
expect: &version.V3_1,
},
{
name: "Setting a RequestHeader AuthRevision in subfield implies v3.1",
input: &etcdserverpb.InternalRaftRequest{Header: &etcdserverpb.RequestHeader{AuthRevision: 1}},
expect: &version.V3_1,
},
{
name: "Setting a DowngradeInfoSetRequest implies v3.5",
input: &etcdserverpb.InternalRaftRequest{DowngradeInfoSet: &membershippb.DowngradeInfoSetRequest{}},
expect: &version.V3_5,
},
{
name: "Enum CompareResult set to EQUAL implies v3.0",
input: &etcdserverpb.Compare{Result: etcdserverpb.Compare_EQUAL},
expect: &version.V3_0,
},
{
name: "Enum CompareResult set to NOT_EQUAL implies v3.1",
input: &etcdserverpb.Compare{Result: etcdserverpb.Compare_NOT_EQUAL},
expect: &version.V3_1,
},
{
name: "Oneof Compare version set implies v3.1",
input: &etcdserverpb.Compare{TargetUnion: &etcdserverpb.Compare_Version{}},
expect: &version.V3_0,
},
{
name: "Oneof Compare lease set implies v3.3",
input: &etcdserverpb.Compare{TargetUnion: &etcdserverpb.Compare_Lease{}},
expect: &version.V3_3,
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
var maxVer *semver.Version
err := visitMessage(proto.MessageReflect(tc.input), func(path protoreflect.FullName, ver *semver.Version) error {
maxVer = maxVersion(maxVer, ver)
return nil
})
require.NoError(t, err)
assert.Equal(t, tc.expect, maxVer)
})
}
}
func TestEtcdVersionFromFieldOptionsString(t *testing.T) {
tcs := []struct {
input string
expect *semver.Version
}{
{
input: "65001:0",
},
{
input: `65001:0 65004:"NodeID"`,
},
{
input: `[versionpb.XXX]:"3.5"`,
},
{
input: `[versionpb.etcd_version_msg]:"3.5"`,
expect: &version.V3_5,
},
{
input: `[versionpb.etcd_version_enum]:"3.5"`,
expect: &version.V3_5,
},
{
input: `[versionpb.etcd_version_field]:"3.5"`,
expect: &version.V3_5,
},
{
input: `[versionpb.etcd_version_enum_value]:"3.5"`,
expect: &version.V3_5,
},
{
input: `65001:0 [versionpb.etcd_version_msg]:"3.5"`,
expect: &version.V3_5,
},
{
input: `65004:"NodeID" [versionpb.etcd_version_msg]:"3.5"`,
expect: &version.V3_5,
},
{
input: `65004:"NodeID" [versionpb.etcd_version_enum]:"3.5"`,
expect: &version.V3_5,
},
{
input: `[versionpb.other_field]:"NodeID" [versionpb.etcd_version_msg]:"3.5"`,
expect: &version.V3_5,
},
{
input: `[versionpb.etcd_version_msg]:"3.5" 65001:0`,
expect: &version.V3_5,
},
{
input: `[versionpb.etcd_version_msg]:"3.5" 65004:"NodeID"`,
expect: &version.V3_5,
},
{
input: `[versionpb.etcd_version_msg]:"3.5" [versionpb.other_field]:"NodeID"`,
expect: &version.V3_5,
},
{
input: `[versionpb.other_field]:"NodeID" [versionpb.etcd_version_msg]:"3.5" [versionpb.another_field]:"NodeID"`,
expect: &version.V3_5,
},
{
input: `65001:0 [versionpb.etcd_version_msg]:"3.5" 65001:0"`,
expect: &version.V3_5,
},
}
for _, tc := range tcs {
t.Run(tc.input, func(t *testing.T) {
ver, err := etcdVersionFromOptionsString(tc.input)
require.NoError(t, err)
assert.Equal(t, ver, tc.expect)
})
}
}
func TestMaxVersion(t *testing.T) {
tcs := []struct {
a, b, expect *semver.Version
}{
{
a: nil,
b: nil,
expect: nil,
},
{
a: &version.V3_5,
b: nil,
expect: &version.V3_5,
},
{
a: nil,
b: &version.V3_5,
expect: &version.V3_5,
},
{
a: &version.V3_6,
b: &version.V3_5,
expect: &version.V3_6,
},
{
a: &version.V3_5,
b: &version.V3_6,
expect: &version.V3_6,
},
}
for _, tc := range tcs {
t.Run(fmt.Sprintf("%v %v %v", tc.a, tc.b, tc.expect), func(t *testing.T) {
got := maxVersion(tc.a, tc.b)
assert.Equal(t, got, tc.expect)
})
}
}
|
go
|
github
|
https://github.com/etcd-io/etcd
|
server/storage/wal/version_test.go
|
"""log model admin."""
from django.contrib import admin
from django.db import models
from django.forms.widgets import TextInput
from apps.managers.challenge_mgr import challenge_mgr
from apps.managers.log_mgr.models import MakahikiLog
from apps.admin.admin import challenge_designer_site, challenge_manager_site, developer_site
class MakahikiLogAdmin(admin.ModelAdmin):
"""admin"""
list_display = ('request_url', "remote_user", 'remote_ip', 'request_time',
'request_method', 'response_status')
list_filter = ('response_status', 'remote_user')
search_fields = ('request_url', 'remote_ip')
ordering = ["-request_time"]
date_hierarchy = "request_time"
formfield_overrides = {
models.CharField: {'widget': TextInput(attrs={'size': '100'})},
}
def has_add_permission(self, request):
return False
admin.site.register(MakahikiLog, MakahikiLogAdmin)
challenge_designer_site.register(MakahikiLog, MakahikiLogAdmin)
challenge_manager_site.register(MakahikiLog, MakahikiLogAdmin)
developer_site.register(MakahikiLog, MakahikiLogAdmin)
challenge_mgr.register_admin_challenge_info_model("Status", 1, MakahikiLog, 1)
challenge_mgr.register_developer_challenge_info_model("Status", 4, MakahikiLog, 1)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import pywintypes
import struct
import win32event, win32api
import os
import win32com.directsound.directsound as ds
def wav_header_pack(wfx, datasize):
return struct.pack('<4sl4s4slhhllhh4sl', 'RIFF', 36 + datasize,
'WAVE', 'fmt ', 16,
wfx.wFormatTag, wfx.nChannels, wfx.nSamplesPerSec,
wfx.nAvgBytesPerSec, wfx.nBlockAlign,
wfx.wBitsPerSample, 'data', datasize);
d = ds.DirectSoundCaptureCreate(None, None)
sdesc = ds.DSCBUFFERDESC()
sdesc.dwBufferBytes = 352800 # 2 seconds
sdesc.lpwfxFormat = pywintypes.WAVEFORMATEX()
sdesc.lpwfxFormat.wFormatTag = pywintypes.WAVE_FORMAT_PCM
sdesc.lpwfxFormat.nChannels = 2
sdesc.lpwfxFormat.nSamplesPerSec = 44100
sdesc.lpwfxFormat.nAvgBytesPerSec = 176400
sdesc.lpwfxFormat.nBlockAlign = 4
sdesc.lpwfxFormat.wBitsPerSample = 16
print(sdesc)
print(d)
buffer = d.CreateCaptureBuffer(sdesc)
event = win32event.CreateEvent(None, 0, 0, None)
notify = buffer.QueryInterface(ds.IID_IDirectSoundNotify)
notify.SetNotificationPositions((ds.DSBPN_OFFSETSTOP, event))
buffer.Start(0)
win32event.WaitForSingleObject(event, -1)
data = buffer.Update(0, 352800)
fname=os.path.join(win32api.GetTempPath(), 'test_directsound_record.wav')
f = open(fname, 'wb')
f.write(wav_header_pack(sdesc.lpwfxFormat, 352800))
f.write(data)
f.close()
|
unknown
|
codeparrot/codeparrot-clean
| ||
## Input
```javascript
import {identity} from 'shared-runtime';
function Component(statusName) {
// status is local, text is a scope declaration
const {status, text} = foo(statusName);
// color is local, font is a scope declaration
const {color, font} = getStyles(status);
// bg is a declaration
const bg = identity(color);
return (
<div className={bg}>
<span className={font}>{[text]}</span>
</div>
);
}
function foo(name) {
return {
status: `<status>`,
text: `${name}!`,
};
}
function getStyles(status) {
return {
font: 'comic-sans',
color: '#657b83',
};
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: ['Sathya'],
};
```
## Code
```javascript
import { c as _c } from "react/compiler-runtime";
import { identity } from "shared-runtime";
function Component(statusName) {
const $ = _c(12);
let font;
let t0;
let text;
if ($[0] !== statusName) {
const { status, text: t1 } = foo(statusName);
text = t1;
const { color, font: t2 } = getStyles(status);
font = t2;
t0 = identity(color);
$[0] = statusName;
$[1] = font;
$[2] = t0;
$[3] = text;
} else {
font = $[1];
t0 = $[2];
text = $[3];
}
const bg = t0;
let t1;
if ($[4] !== text) {
t1 = [text];
$[4] = text;
$[5] = t1;
} else {
t1 = $[5];
}
let t2;
if ($[6] !== font || $[7] !== t1) {
t2 = <span className={font}>{t1}</span>;
$[6] = font;
$[7] = t1;
$[8] = t2;
} else {
t2 = $[8];
}
let t3;
if ($[9] !== bg || $[10] !== t2) {
t3 = <div className={bg}>{t2}</div>;
$[9] = bg;
$[10] = t2;
$[11] = t3;
} else {
t3 = $[11];
}
return t3;
}
function foo(name) {
const $ = _c(2);
const t0 = `${name}!`;
let t1;
if ($[0] !== t0) {
t1 = { status: "<status>", text: t0 };
$[0] = t0;
$[1] = t1;
} else {
t1 = $[1];
}
return t1;
}
function getStyles(status) {
const $ = _c(1);
let t0;
if ($[0] === Symbol.for("react.memo_cache_sentinel")) {
t0 = { font: "comic-sans", color: "#657b83" };
$[0] = t0;
} else {
t0 = $[0];
}
return t0;
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: ["Sathya"],
};
```
### Eval output
(kind: ok) <div class="#657b83"><span class="comic-sans">Sathya!</span></div>
|
unknown
|
github
|
https://github.com/facebook/react
|
compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/sequential-destructuring-both-mixed-local-and-scope-declaration.expect.md
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
_current_dir = os.path.dirname(os.path.realpath(__file__))
# jinja2 is in chromium's third_party directory
# Insert at front to override system libraries, and after path[0] == script dir
sys.path.insert(1, os.path.join(_current_dir, *([os.pardir] * 4)))
import jinja2
def apply_template(path_to_template, params, filters=None):
dirname, basename = os.path.split(path_to_template)
path_to_templates = os.path.join(_current_dir, 'templates')
jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader([dirname, path_to_templates]),
keep_trailing_newline=True, # newline-terminate generated files
lstrip_blocks=True, # so can indent control flow tags
trim_blocks=True) # so don't need {%- -%} everywhere
if filters:
jinja_env.filters.update(filters)
template = jinja_env.get_template(basename)
return template.render(params)
def use_jinja(template_file_name, filters=None):
def real_decorator(generator):
def generator_internal(*args, **kwargs):
parameters = generator(*args, **kwargs)
return apply_template(template_file_name, parameters, filters=filters)
generator_internal.func_name = generator.func_name
return generator_internal
return real_decorator
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package connectivity_test
import (
"context"
"math/rand"
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
"go.etcd.io/etcd/client/pkg/v3/transport"
clientv3 "go.etcd.io/etcd/client/v3"
"go.etcd.io/etcd/tests/v3/framework/integration"
"go.etcd.io/etcd/tests/v3/framework/testutils"
clientv3test "go.etcd.io/etcd/tests/v3/integration/clientv3"
)
var (
testTLSInfo = transport.TLSInfo{
KeyFile: testutils.MustAbsPath("../../../fixtures/server.key.insecure"),
CertFile: testutils.MustAbsPath("../../../fixtures/server.crt"),
TrustedCAFile: testutils.MustAbsPath("../../../fixtures/ca.crt"),
ClientCertAuth: true,
}
testTLSInfoExpired = transport.TLSInfo{
KeyFile: testutils.MustAbsPath("../../fixtures-expired/server.key.insecure"),
CertFile: testutils.MustAbsPath("../../fixtures-expired/server.crt"),
TrustedCAFile: testutils.MustAbsPath("../../fixtures-expired/ca.crt"),
ClientCertAuth: true,
}
)
// TestDialTLSExpired tests client with expired certs fails to dial.
func TestDialTLSExpired(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo})
defer clus.Terminate(t)
tls, err := testTLSInfoExpired.ClientConfig()
require.NoError(t, err)
// expect remote errors "tls: bad certificate"
_, err = integration.NewClient(t, clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCURL},
DialTimeout: 3 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()}, //nolint:staticcheck // TODO: remove for a supported version
TLS: tls,
})
require.Truef(t, clientv3test.IsClientTimeout(err), "expected dial timeout error")
}
// TestDialTLSNoConfig ensures the client fails to dial / times out
// when TLS endpoints (https, unixs) are given but no tls config.
func TestDialTLSNoConfig(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo})
defer clus.Terminate(t)
// expect "signed by unknown authority"
c, err := integration.NewClient(t, clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCURL},
DialTimeout: time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()}, //nolint:staticcheck // TODO: remove for a supported version
})
defer func() {
if c != nil {
c.Close()
}
}()
require.Truef(t, clientv3test.IsClientTimeout(err), "expected dial timeout error")
}
// TestDialSetEndpointsBeforeFail ensures SetEndpoints can replace unavailable
// endpoints with available ones.
func TestDialSetEndpointsBeforeFail(t *testing.T) {
testDialSetEndpoints(t, true)
}
func TestDialSetEndpointsAfterFail(t *testing.T) {
testDialSetEndpoints(t, false)
}
// testDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones.
func testDialSetEndpoints(t *testing.T, setBefore bool) {
integration.BeforeTest(t)
clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
// get endpoint list
eps := make([]string, 3)
for i := range eps {
eps[i] = clus.Members[i].GRPCURL
}
toKill := rand.Intn(len(eps))
cfg := clientv3.Config{
Endpoints: []string{eps[toKill]},
DialTimeout: 1 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()}, //nolint:staticcheck // TODO: remove for a supported version
}
cli, err := integration.NewClient(t, cfg)
require.NoError(t, err)
defer cli.Close()
if setBefore {
cli.SetEndpoints(eps[toKill%3], eps[(toKill+1)%3])
}
// make a dead node
clus.Members[toKill].Stop(t)
clus.WaitLeader(t)
if !setBefore {
cli.SetEndpoints(eps[toKill%3], eps[(toKill+1)%3])
}
time.Sleep(time.Second * 2)
ctx, cancel := context.WithTimeout(t.Context(), integration.RequestWaitTimeout)
_, err = cli.Get(ctx, "foo", clientv3.WithSerializable())
require.NoError(t, err)
cancel()
}
// TestSwitchSetEndpoints ensures SetEndpoints can switch one endpoint
// with a new one that doesn't include original endpoint.
func TestSwitchSetEndpoints(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
// get non partitioned members endpoints
eps := []string{clus.Members[1].GRPCURL, clus.Members[2].GRPCURL}
cli := clus.Client(0)
clus.Members[0].InjectPartition(t, clus.Members[1:]...)
cli.SetEndpoints(eps...)
ctx, cancel := context.WithTimeout(t.Context(), 10*time.Second)
defer cancel()
_, err := cli.Get(ctx, "foo")
require.NoError(t, err)
}
func TestRejectOldCluster(t *testing.T) {
integration.BeforeTest(t)
// 2 endpoints to test multi-endpoint Status
clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 2})
defer clus.Terminate(t)
cfg := clientv3.Config{
Endpoints: []string{clus.Members[0].GRPCURL, clus.Members[1].GRPCURL},
DialTimeout: 5 * time.Second,
DialOptions: []grpc.DialOption{grpc.WithBlock()}, //nolint:staticcheck // TODO: remove for a supported version
RejectOldCluster: true,
}
cli, err := integration.NewClient(t, cfg)
require.NoError(t, err)
cli.Close()
}
// TestDialForeignEndpoint checks an endpoint that is not registered
// with the balancer can be dialed.
func TestDialForeignEndpoint(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 2})
defer clus.Terminate(t)
conn, err := clus.Client(0).Dial(clus.Client(1).Endpoints()[0])
require.NoError(t, err)
defer conn.Close()
// grpc can return a lazy connection that's not connected yet; confirm
// that it can communicate with the cluster.
kvc := clientv3.NewKVFromKVClient(pb.NewKVClient(conn), clus.Client(0))
ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)
defer cancel()
_, gerr := kvc.Get(ctx, "abc")
require.NoError(t, gerr)
}
// TestSetEndpointAndPut checks that a Put following a SetEndpoints
// to a working endpoint will always succeed.
func TestSetEndpointAndPut(t *testing.T) {
integration.BeforeTest(t)
clus := integration.NewCluster(t, &integration.ClusterConfig{Size: 2})
defer clus.Terminate(t)
clus.Client(1).SetEndpoints(clus.Members[0].GRPCURL)
_, err := clus.Client(1).Put(t.Context(), "foo", "bar")
if err != nil && !strings.Contains(err.Error(), "closing") {
t.Fatal(err)
}
}
|
go
|
github
|
https://github.com/etcd-io/etcd
|
tests/integration/clientv3/connectivity/dial_test.go
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from collections import defaultdict
def get_jar_infos(ivy_products, confs=None):
"""Returns a list of dicts containing the paths of various jar file resources.
Keys include 'default' (normal jar path), 'sources' (path to source jar), and 'javadoc'
(path to doc jar). None of them are guaranteed to be present, but 'sources' and 'javadoc'
will never be present if 'default' isn't.
:param ivy_products: ivy_jar_products data from a context
:param confs: List of key types to return (eg ['default', 'sources']). Just returns 'default' if
left unspecified.
:returns mapping of IvyModuleRef --> {'default' : [<jar_filenames>],
'sources' : [<jar_filenames>],
'javadoc' : [<jar_filenames>]}
"""
confs = confs or ['default']
classpath_maps = defaultdict(dict)
if ivy_products:
for conf, info_group in ivy_products.items():
if conf not in confs:
continue # We don't care about it.
for info in info_group:
for module in info.modules_by_ref.values():
if module.artifacts:
classpath_maps[module.ref][conf] = [artifact.path for artifact in module.artifacts]
return classpath_maps
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import copy
import datetime
import errno
import glob
import os
import random
import re
import shutil
import signal
import threading
import time
import uuid
import eventlet
from eventlet import greenthread
import fixtures
from lxml import etree
import mock
from mox3 import mox
from os_brick.initiator import connector
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_utils import encodeutils
from oslo_utils import fileutils
from oslo_utils import fixture as utils_fixture
from oslo_utils import importutils
from oslo_utils import units
from oslo_utils import uuidutils
from oslo_utils import versionutils
import six
from six.moves import builtins
from six.moves import range
from nova.api.metadata import base as instance_metadata
from nova.compute import arch
from nova.compute import cpumodel
from nova.compute import manager
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.network import model as network_model
from nova import objects
from nova.objects import block_device as block_device_obj
from nova.objects import fields
from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
import nova.tests.unit.image.fake
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_pci_device
from nova.tests.unit.objects import test_vcpu_model
from nova.tests.unit.virt.libvirt import fake_imagebackend
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import fake
from nova.virt import firewall as base_firewall
from nova.virt import hardware
from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import firewall
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt.storage import dmcrypt
from nova.virt.libvirt.storage import lvm
from nova.virt.libvirt.storage import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt.volume import volume as volume_drivers
libvirt_driver.libvirt = fakelibvirt
host.libvirt = fakelibvirt
libvirt_guest.libvirt = fakelibvirt
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
CONF.import_opt('instances_path', 'nova.compute.manager')
_fake_network_info = fake_network.fake_get_instance_nw_info
_fake_NodeDevXml = \
{"pci_0000_04_00_3": """
<device>
<name>pci_0000_04_00_3</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igb</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>0</slot>
<function>3</function>
<product id='0x1521'>I350 Gigabit Network Connection</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<capability type='virt_functions'>
<address domain='0x0000' bus='0x04' slot='0x10' function='0x3'/>
<address domain='0x0000' bus='0x04' slot='0x10' function='0x7'/>
<address domain='0x0000' bus='0x04' slot='0x11' function='0x3'/>
<address domain='0x0000' bus='0x04' slot='0x11' function='0x7'/>
</capability>
</capability>
</device>""",
"pci_0000_04_10_7": """
<device>
<name>pci_0000_04_10_7</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igbvf</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>16</slot>
<function>7</function>
<product id='0x1520'>I350 Ethernet Controller Virtual Function
</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<capability type='phys_function'>
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
</capability>
<capability type='virt_functions'>
</capability>
</capability>
</device>""",
"pci_0000_04_11_7": """
<device>
<name>pci_0000_04_11_7</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igbvf</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>17</slot>
<function>7</function>
<product id='0x1520'>I350 Ethernet Controller Virtual Function
</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<numa node='0'/>
<capability type='phys_function'>
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
</capability>
<capability type='virt_functions'>
</capability>
</capability>
</device>""",
"pci_0000_04_00_1": """
<device>
<name>pci_0000_04_00_1</name>
<path>/sys/devices/pci0000:00/0000:00:02.0/0000:04:00.1</path>
<parent>pci_0000_00_02_0</parent>
<driver>
<name>mlx5_core</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>0</slot>
<function>1</function>
<product id='0x1013'>MT27700 Family [ConnectX-4]</product>
<vendor id='0x15b3'>Mellanox Technologies</vendor>
<iommuGroup number='15'>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/>
</iommuGroup>
<numa node='0'/>
<pci-express>
<link validity='cap' port='0' speed='8' width='16'/>
<link validity='sta' speed='8' width='16'/>
</pci-express>
</capability>
</device>""",
# libvirt >= 1.3.0 nodedev-dumpxml
"pci_0000_03_00_0": """
<device>
<name>pci_0000_03_00_0</name>
<path>/sys/devices/pci0000:00/0000:00:02.0/0000:03:00.0</path>
<parent>pci_0000_00_02_0</parent>
<driver>
<name>mlx5_core</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>3</bus>
<slot>0</slot>
<function>0</function>
<product id='0x1013'>MT27700 Family [ConnectX-4]</product>
<vendor id='0x15b3'>Mellanox Technologies</vendor>
<capability type='virt_functions' maxCount='16'>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x2'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x3'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x4'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x5'/>
</capability>
<iommuGroup number='15'>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/>
</iommuGroup>
<numa node='0'/>
<pci-express>
<link validity='cap' port='0' speed='8' width='16'/>
<link validity='sta' speed='8' width='16'/>
</pci-express>
</capability>
</device>""",
"pci_0000_03_00_1": """
<device>
<name>pci_0000_03_00_1</name>
<path>/sys/devices/pci0000:00/0000:00:02.0/0000:03:00.1</path>
<parent>pci_0000_00_02_0</parent>
<driver>
<name>mlx5_core</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>3</bus>
<slot>0</slot>
<function>1</function>
<product id='0x1013'>MT27700 Family [ConnectX-4]</product>
<vendor id='0x15b3'>Mellanox Technologies</vendor>
<capability type='virt_functions' maxCount='16'/>
<iommuGroup number='15'>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/>
</iommuGroup>
<numa node='0'/>
<pci-express>
<link validity='cap' port='0' speed='8' width='16'/>
<link validity='sta' speed='8' width='16'/>
</pci-express>
</capability>
</device>""",
}
_fake_cpu_info = {
"arch": "test_arch",
"model": "test_model",
"vendor": "test_vendor",
"topology": {
"sockets": 1,
"cores": 8,
"threads": 16
},
"features": ["feature1", "feature2"]
}
def _concurrency(signal, wait, done, target, is_block_dev=False):
signal.send()
wait.wait()
done.send()
class FakeVirtDomain(object):
def __init__(self, fake_xml=None, uuidstr=None, id=None, name=None):
if uuidstr is None:
uuidstr = str(uuid.uuid4())
self.uuidstr = uuidstr
self.id = id
self.domname = name
self._info = [power_state.RUNNING, 2048 * units.Mi, 1234 * units.Mi,
None, None]
if fake_xml:
self._fake_dom_xml = fake_xml
else:
self._fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
"""
def name(self):
if self.domname is None:
return "fake-domain %s" % self
else:
return self.domname
def ID(self):
return self.id
def info(self):
return self._info
def create(self):
pass
def managedSave(self, *args):
pass
def createWithFlags(self, launch_flags):
pass
def XMLDesc(self, flags):
return self._fake_dom_xml
def UUIDString(self):
return self.uuidstr
def attachDeviceFlags(self, xml, flags):
pass
def attachDevice(self, xml):
pass
def detachDeviceFlags(self, xml, flags):
pass
def snapshotCreateXML(self, xml, flags):
pass
def blockCommit(self, disk, base, top, bandwidth=0, flags=0):
pass
def blockRebase(self, disk, base, bandwidth=0, flags=0):
pass
def blockJobInfo(self, path, flags):
pass
def resume(self):
pass
def destroy(self):
pass
def fsFreeze(self, disks=None, flags=0):
pass
def fsThaw(self, disks=None, flags=0):
pass
def isActive(self):
return True
class CacheConcurrencyTestCase(test.NoDBTestCase):
def setUp(self):
super(CacheConcurrencyTestCase, self).setUp()
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
# utils.synchronized() will create the lock_path for us if it
# doesn't already exist. It will also delete it when it's done,
# which can cause race conditions with the multiple threads we
# use for tests. So, create the path here so utils.synchronized()
# won't delete it out from under one of the threads.
self.lock_path = os.path.join(CONF.instances_path, 'locks')
fileutils.ensure_tree(self.lock_path)
def fake_exists(fname):
basedir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
if fname == basedir or fname == self.lock_path:
return True
return False
def fake_execute(*args, **kwargs):
pass
def fake_extend(image, size, use_cow=False):
pass
self.stub_out('os.path.exists', fake_exists)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(imagebackend.disk, 'extend', fake_extend)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def _fake_instance(self, uuid):
return objects.Instance(id=1, uuid=uuid)
def test_same_fname_concurrency(self):
# Ensures that the same fname cache runs at a sequentially.
uuid = uuidutils.generate_uuid()
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
# Thread 1 should run before thread 2.
sig1.wait()
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname', None,
signal=sig2, wait=wait2, done=done2)
wait2.send()
eventlet.sleep(0)
try:
self.assertFalse(done2.ready())
finally:
wait1.send()
done1.wait()
eventlet.sleep(0)
self.assertTrue(done2.ready())
# Wait on greenthreads to assert they didn't raise exceptions
# during execution
thr1.wait()
thr2.wait()
def test_different_fname_concurrency(self):
# Ensures that two different fname caches are concurrent.
uuid = uuidutils.generate_uuid()
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname2', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
# Thread 1 should run before thread 2.
sig1.wait()
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname1', None,
signal=sig2, wait=wait2, done=done2)
eventlet.sleep(0)
# Wait for thread 2 to start.
sig2.wait()
wait2.send()
tries = 0
while not done2.ready() and tries < 10:
eventlet.sleep(0)
tries += 1
try:
self.assertTrue(done2.ready())
finally:
wait1.send()
eventlet.sleep(0)
# Wait on greenthreads to assert they didn't raise exceptions
# during execution
thr1.wait()
thr2.wait()
class FakeVolumeDriver(object):
def __init__(self, *args, **kwargs):
pass
def attach_volume(self, *args):
pass
def detach_volume(self, *args):
pass
def get_xml(self, *args):
return ""
def get_config(self, *args):
"""Connect the volume to a fake device."""
conf = vconfig.LibvirtConfigGuestDisk()
conf.source_type = "network"
conf.source_protocol = "fake"
conf.source_name = "fake"
conf.target_dev = "fake"
conf.target_bus = "fake"
return conf
def connect_volume(self, *args):
"""Connect the volume to a fake device."""
pass
class FakeConfigGuestDisk(object):
def __init__(self, *args, **kwargs):
self.source_type = None
self.driver_cache = None
class FakeConfigGuest(object):
def __init__(self, *args, **kwargs):
self.driver_cache = None
class FakeNodeDevice(object):
def __init__(self, fakexml):
self.xml = fakexml
def XMLDesc(self, flags):
return self.xml
def _create_test_instance():
flavor = objects.Flavor(memory_mb=2048,
swap=0,
vcpu_weight=None,
root_gb=1,
id=2,
name=u'm1.small',
ephemeral_gb=0,
rxtx_factor=1.0,
flavorid=u'1',
vcpus=1,
extra_specs={})
return {
'id': 1,
'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
'memory_kb': '1024000',
'basepath': '/some/path',
'bridge_name': 'br100',
'display_name': "Acme webserver",
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'root_gb': 10,
'ephemeral_gb': 20,
'instance_type_id': '5', # m1.small
'extra_specs': {},
'system_metadata': {
'image_disk_format': 'raw',
},
'flavor': flavor,
'new_flavor': None,
'old_flavor': None,
'pci_devices': objects.PciDeviceList(),
'numa_topology': None,
'config_drive': None,
'vm_mode': None,
'kernel_id': None,
'ramdisk_id': None,
'os_type': 'linux',
'user_id': '838a72b0-0d54-4827-8fd6-fb1227633ceb',
'ephemeral_key_uuid': None,
'vcpu_model': None,
'host': 'fake-host',
'task_state': None,
}
class LibvirtConnTestCase(test.NoDBTestCase):
REQUIRES_LOCKING = True
_EPHEMERAL_20_DEFAULT = ('ephemeral_20_%s' %
utils.get_hash_str(disk._DEFAULT_FILE_SYSTEM)[:7])
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
self.flags(fake_call=True)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.get_admin_context()
temp_dir = self.useFixture(fixtures.TempDir()).path
self.flags(instances_path=temp_dir)
self.flags(snapshots_directory=temp_dir, group='libvirt')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.flags(sysinfo_serial="hardware", group="libvirt")
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def fake_extend(image, size, use_cow=False):
pass
self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
self.stubs.Set(imagebackend.Image, 'resolve_driver_format',
imagebackend.Image._get_driver_format)
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.test_instance = _create_test_instance()
self.test_image_meta = {
"disk_format": "raw",
}
self.image_service = nova.tests.unit.image.fake.stub_out_image_service(
self)
self.device_xml_tmpl = """
<domain type='kvm'>
<devices>
<disk type='block' device='disk'>
<driver name='qemu' type='raw' cache='none'/>
<source dev='{device_path}'/>
<target bus='virtio' dev='vdb'/>
<serial>58a84f6d-3f0c-4e19-a0af-eb657b790657</serial>
<address type='pci' domain='0x0' bus='0x0' slot='0x04' \
function='0x0'/>
</disk>
</devices>
</domain>
"""
def relpath(self, path):
return os.path.relpath(path, CONF.instances_path)
def tearDown(self):
nova.tests.unit.image.fake.FakeImageService_reset()
super(LibvirtConnTestCase, self).tearDown()
def test_driver_capabilities(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertTrue(drvr.capabilities['has_imagecache'],
'Driver capabilities for \'has_imagecache\' '
'is invalid')
self.assertTrue(drvr.capabilities['supports_recreate'],
'Driver capabilities for \'supports_recreate\' '
'is invalid')
self.assertFalse(drvr.capabilities['supports_migrate_to_same_host'],
'Driver capabilities for '
'\'supports_migrate_to_same_host\' is invalid')
def create_fake_libvirt_mock(self, **kwargs):
"""Defining mocks for LibvirtDriver(libvirt is not used)."""
# A fake libvirt.virConnect
class FakeLibvirtDriver(object):
def defineXML(self, xml):
return FakeVirtDomain()
# Creating mocks
volume_driver = ['iscsi=nova.tests.unit.virt.libvirt.test_driver'
'.FakeVolumeDriver']
fake = FakeLibvirtDriver()
# Customizing above fake if necessary
for key, val in kwargs.items():
fake.__setattr__(key, val)
self.stubs.Set(libvirt_driver.LibvirtDriver, '_conn', fake)
self.stubs.Set(libvirt_driver.LibvirtDriver, '_get_volume_drivers',
lambda x: volume_driver)
self.stubs.Set(host.Host, 'get_connection', lambda x: fake)
def fake_lookup(self, instance_name):
return FakeVirtDomain()
def fake_execute(self, *args, **kwargs):
open(args[-1], "a").close()
def _create_service(self, **kwargs):
service_ref = {'host': kwargs.get('host', 'dummy'),
'disabled': kwargs.get('disabled', False),
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0}
return objects.Service(**service_ref)
def _get_pause_flag(self, drvr, network_info, power_on=True,
vifs_already_plugged=False):
timeout = CONF.vif_plugging_timeout
events = []
if (drvr._conn_supports_start_paused and
utils.is_neutron() and
not vifs_already_plugged and
power_on and timeout):
events = drvr._get_neutron_events(network_info)
return bool(events)
def test_public_api_signatures(self):
baseinst = driver.ComputeDriver(None)
inst = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertPublicAPISignatures(baseinst, inst)
def test_legacy_block_device_info(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertFalse(drvr.need_legacy_block_device_info)
@mock.patch.object(host.Host, "has_min_version")
def test_min_version_start_ok(self, mock_version):
mock_version.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
@mock.patch.object(host.Host, "has_min_version")
def test_min_version_start_abort(self, mock_version):
mock_version.return_value = False
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.NovaException,
drvr.init_host,
"dummyhost")
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) - 1)
@mock.patch.object(libvirt_driver.LOG, 'warning')
def test_next_min_version_deprecation_warning(self, mock_warning,
mock_get_libversion):
# Skip test if there's no currently planned new min version
if (versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) ==
versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_VERSION)):
self.skipTest("NEXT_MIN_LIBVIRT_VERSION == MIN_LIBVIRT_VERSION")
# Test that a warning is logged if the libvirt version is less than
# the next required minimum version.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
# assert that the next min version is in a warning message
expected_arg = {'version': versionutils.convert_version_to_str(
versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))}
version_arg_found = False
for call in mock_warning.call_args_list:
if call[0][1] == expected_arg:
version_arg_found = True
break
self.assertTrue(version_arg_found)
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))
@mock.patch.object(libvirt_driver.LOG, 'warning')
def test_next_min_version_ok(self, mock_warning, mock_get_libversion):
# Skip test if there's no currently planned new min version
if (versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) ==
versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_VERSION)):
self.skipTest("NEXT_MIN_LIBVIRT_VERSION == MIN_LIBVIRT_VERSION")
# Test that a warning is not logged if the libvirt version is greater
# than or equal to NEXT_MIN_LIBVIRT_VERSION.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
# assert that the next min version is in a warning message
expected_arg = {'version': versionutils.convert_version_to_str(
versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))}
version_arg_found = False
for call in mock_warning.call_args_list:
if call[0][1] == expected_arg:
version_arg_found = True
break
self.assertFalse(version_arg_found)
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get(
arch.PPC64)) - 1)
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_OTHER_ARCH.get(
arch.PPC64)))
@mock.patch.object(arch, "from_host", return_value=arch.PPC64)
def test_min_version_ppc_old_libvirt(self, mock_libv, mock_qemu,
mock_arch):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.NovaException,
drvr.init_host,
"dummyhost")
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get(
arch.PPC64)))
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_OTHER_ARCH.get(
arch.PPC64)) - 1)
@mock.patch.object(arch, "from_host", return_value=arch.PPC64)
def test_min_version_ppc_old_qemu(self, mock_libv, mock_qemu,
mock_arch):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.NovaException,
drvr.init_host,
"dummyhost")
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get(
arch.PPC64)))
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_OTHER_ARCH.get(
arch.PPC64)))
@mock.patch.object(arch, "from_host", return_value=arch.PPC64)
def test_min_version_ppc_ok(self, mock_libv, mock_qemu, mock_arch):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get(
arch.S390X)) - 1)
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_OTHER_ARCH.get(
arch.S390X)))
@mock.patch.object(arch, "from_host", return_value=arch.S390X)
def test_min_version_s390_old_libvirt(self, mock_libv, mock_qemu,
mock_arch):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.NovaException,
drvr.init_host,
"dummyhost")
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get(
arch.S390X)))
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_OTHER_ARCH.get(
arch.S390X)) - 1)
@mock.patch.object(arch, "from_host", return_value=arch.S390X)
def test_min_version_s390_old_qemu(self, mock_libv, mock_qemu,
mock_arch):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.NovaException,
drvr.init_host,
"dummyhost")
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get(
arch.S390X)))
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_OTHER_ARCH.get(
arch.S390X)))
@mock.patch.object(arch, "from_host", return_value=arch.S390X)
def test_min_version_s390_ok(self, mock_libv, mock_qemu, mock_arch):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
def _do_test_parse_migration_flags(self, lm_config=None, lm_expected=None,
bm_config=None, bm_expected=None):
if lm_config is not None:
self.flags(live_migration_flag=lm_config, group='libvirt')
if bm_config is not None:
self.flags(block_migration_flag=bm_config, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr._parse_migration_flags()
if lm_expected is not None:
self.assertEqual(lm_expected, drvr._live_migration_flags)
if bm_expected is not None:
self.assertEqual(bm_expected, drvr._block_migration_flags)
def test_parse_live_migration_flags_default(self):
self._do_test_parse_migration_flags(
lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED'),
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED))
def test_parse_live_migration_flags(self):
self._do_test_parse_migration_flags(
lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE'),
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE))
def test_parse_block_migration_flags_default(self):
self._do_test_parse_migration_flags(
bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, '
'VIR_MIGRATE_NON_SHARED_INC'),
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED |
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
def test_parse_block_migration_flags(self):
self._do_test_parse_migration_flags(
bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, VIR_MIGRATE_NON_SHARED_INC'),
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
@mock.patch('nova.virt.libvirt.driver.LOG')
def test_parse_live_migration_flag_with_invalid_flag(self, mock_log):
self._do_test_parse_migration_flags(
lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, '
'VIR_MIGRATE_FOO_BAR'),
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED),
bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, '
'VIR_MIGRATE_NON_SHARED_INC, VIR_MIGRATE_FOO_BAR'),
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED |
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
msg = mock_log.warning.call_args_list[0]
self.assertIn("unknown libvirt live migration flag", msg[0][0])
msg = mock_log.warning.call_args_list[1]
self.assertIn("unknown libvirt live migration flag", msg[0][0])
@mock.patch('nova.virt.libvirt.driver.LOG')
def test_parse_migration_flags_unsafe_block(self, mock_log):
'''Test if the driver logs a warning if the live_migration_flag
and/or block_migration_flag config option uses a value which can
cause potential damage.
'''
self._do_test_parse_migration_flags(
lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, '
'VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, '
'VIR_MIGRATE_TUNNELLED, '
'VIR_MIGRATE_NON_SHARED_INC'),
bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, '
'VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, '
'VIR_MIGRATE_TUNNELLED'),
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED),
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED |
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
msg = mock_log.warning.call_args_list[0]
self.assertIn('Removing the VIR_MIGRATE_NON_SHARED_INC', msg[0][0])
msg = mock_log.warning.call_args_list[1]
self.assertIn('Adding the VIR_MIGRATE_NON_SHARED_INC', msg[0][0])
@mock.patch('nova.virt.libvirt.driver.LOG')
def test_parse_migration_flags_p2p_missing(self, mock_log):
self._do_test_parse_migration_flags(
lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, '
'VIR_MIGRATE_LIVE, '
'VIR_MIGRATE_TUNNELLED'),
bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, '
'VIR_MIGRATE_LIVE, '
'VIR_MIGRATE_TUNNELLED, '
'VIR_MIGRATE_NON_SHARED_INC'),
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED),
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED |
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
msg = mock_log.warning.call_args_list[0]
self.assertIn('Adding the VIR_MIGRATE_PEER2PEER flag', msg[0][0])
msg = mock_log.warning.call_args_list[1]
self.assertIn('Adding the VIR_MIGRATE_PEER2PEER flag', msg[0][0])
@mock.patch('nova.virt.libvirt.driver.LOG')
def test_parse_migration_flags_p2p_xen(self, mock_log):
self.flags(virt_type='xen', group='libvirt')
self._do_test_parse_migration_flags(
lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, '
'VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, '
'VIR_MIGRATE_TUNNELLED'),
bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, '
'VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, '
'VIR_MIGRATE_TUNNELLED, '
'VIR_MIGRATE_NON_SHARED_INC'),
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED),
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED |
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
msg = mock_log.warning.call_args_list[0]
self.assertIn('Removing the VIR_MIGRATE_PEER2PEER flag', msg[0][0])
msg = mock_log.warning.call_args_list[1]
self.assertIn('Removing the VIR_MIGRATE_PEER2PEER flag', msg[0][0])
@mock.patch('nova.virt.libvirt.driver.LOG')
def test_parse_migration_flags_config_mgmt(self, mock_log):
self._do_test_parse_migration_flags(
lm_config=('VIR_MIGRATE_PERSIST_DEST, '
'VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, '
'VIR_MIGRATE_TUNNELLED'),
bm_config=('VIR_MIGRATE_PERSIST_DEST, '
'VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, '
'VIR_MIGRATE_TUNNELLED, '
'VIR_MIGRATE_NON_SHARED_INC'),
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED),
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED |
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
msg = mock_log.warning.call_args_list[0]
self.assertIn('Adding the VIR_MIGRATE_UNDEFINE_SOURCE flag', msg[0][0])
msg = mock_log.warning.call_args_list[1]
self.assertIn('Removing the VIR_MIGRATE_PERSIST_DEST flag', msg[0][0])
msg = mock_log.warning.call_args_list[2]
self.assertIn('Adding the VIR_MIGRATE_UNDEFINE_SOURCE flag', msg[0][0])
msg = mock_log.warning.call_args_list[3]
self.assertIn('Removing the VIR_MIGRATE_PERSIST_DEST flag', msg[0][0])
@mock.patch('nova.virt.libvirt.driver.LOG')
def test_live_migration_tunnelled_true(self, mock_log):
self.flags(live_migration_tunnelled=True, group='libvirt')
self._do_test_parse_migration_flags(
lm_config=('VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_UNDEFINE_SOURCE, '
'VIR_MIGRATE_LIVE'),
bm_config=('VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_UNDEFINE_SOURCE, '
'VIR_MIGRATE_LIVE, VIR_MIGRATE_NON_SHARED_INC'),
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED),
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC |
libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED))
msg = mock_log.warning.call_args_list[0]
self.assertIn('does not contain the VIR_MIGRATE_TUNNELLED', msg[0][0])
msg = mock_log.warning.call_args_list[1]
self.assertIn('does not contain the VIR_MIGRATE_TUNNELLED', msg[0][0])
@mock.patch('nova.virt.libvirt.driver.LOG')
def test_live_migration_tunnelled_false(self, mock_log):
self.flags(live_migration_tunnelled=False, group='libvirt')
self._do_test_parse_migration_flags(
lm_config=('VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_UNDEFINE_SOURCE, '
'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED'),
bm_config=('VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_UNDEFINE_SOURCE, '
'VIR_MIGRATE_LIVE, VIR_MIGRATE_NON_SHARED_INC, '
'VIR_MIGRATE_TUNNELLED'),
lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE),
bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
msg = mock_log.warning.call_args_list[0]
self.assertIn('contains the VIR_MIGRATE_TUNNELLED flag', msg[0][0])
msg = mock_log.warning.call_args_list[1]
self.assertIn('contains the VIR_MIGRATE_TUNNELLED flag', msg[0][0])
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_set_admin_password(self, mock_get_guest, ver, mock_image):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_get_guest.return_value = mock_guest
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.set_admin_password(instance, "123")
mock_guest.set_user_password.assert_called_once_with("root", "123")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_set_admin_password_windows(self, mock_get_guest, ver, mock_image):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
instance.os_type = "windows"
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_get_guest.return_value = mock_guest
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.set_admin_password(instance, "123")
mock_guest.set_user_password.assert_called_once_with(
"Administrator", "123")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_set_admin_password_image(self, mock_get_guest, ver, mock_image):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes",
"os_admin_user": "foo"
}}
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_get_guest.return_value = mock_guest
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.set_admin_password(instance, "123")
mock_guest.set_user_password.assert_called_once_with("foo", "123")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=False)
def test_set_admin_password_bad_version(self, mock_svc, mock_image):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.SetAdminPasswdNotSupported,
drvr.set_admin_password, instance, "123")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_set_admin_password_bad_hyp(self, mock_svc, mock_image):
self.flags(virt_type='foo', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.SetAdminPasswdNotSupported,
drvr.set_admin_password, instance, "123")
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_set_admin_password_guest_agent_not_running(self, mock_svc):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.QemuGuestAgentNotEnabled,
drvr.set_admin_password, instance, "123")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_set_admin_password_error(self, mock_get_guest, ver, mock_image):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.test_instance)
mock_image.return_value = {"properties": {
"hw_qemu_guest_agent": "yes"}}
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_guest.set_user_password.side_effect = (
fakelibvirt.libvirtError("error"))
mock_get_guest.return_value = mock_guest
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.NovaException,
drvr.set_admin_password, instance, "123")
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_disable(self, mock_svc):
# Tests disabling an enabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(False)
self.assertTrue(svc.disabled)
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_enable(self, mock_svc):
# Tests enabling a disabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(disabled=True, host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(True)
self.assertTrue(svc.disabled)
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_enable_state_enabled(self, mock_svc):
# Tests enabling an enabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(disabled=False, host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(True)
self.assertFalse(svc.disabled)
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_disable_state_disabled(self, mock_svc):
# Tests disabling a disabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(disabled=True, host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(False)
self.assertTrue(svc.disabled)
def test_set_host_enabled_swallows_exceptions(self):
# Tests that set_host_enabled will swallow exceptions coming from the
# db_api code so they don't break anything calling it, e.g. the
# _get_new_connection method.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
with mock.patch.object(db, 'service_get_by_compute_host') as db_mock:
# Make db.service_get_by_compute_host raise NovaException; this
# is more robust than just raising ComputeHostNotFound.
db_mock.side_effect = exception.NovaException
drvr._set_host_enabled(False)
@mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName")
def test_prepare_pci_device(self, mock_lookup):
pci_devices = [dict(hypervisor_name='xxx')]
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conn = drvr._host.get_connection()
mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn)
drvr._prepare_pci_devices_for_use(pci_devices)
@mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName")
@mock.patch.object(fakelibvirt.virNodeDevice, "dettach")
def test_prepare_pci_device_exception(self, mock_detach, mock_lookup):
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid')]
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conn = drvr._host.get_connection()
mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn)
mock_detach.side_effect = fakelibvirt.libvirtError("xxxx")
self.assertRaises(exception.PciDevicePrepareFailed,
drvr._prepare_pci_devices_for_use, pci_devices)
def test_detach_pci_devices_exception(self):
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid')]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(host.Host,
'has_min_version')
host.Host.has_min_version = lambda x, y: False
self.assertRaises(exception.PciDeviceDetachFailed,
drvr._detach_pci_devices, None, pci_devices)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc')
def test_detach_pci_devices(self, mocked_get_xml_desc, *args):
fake_domXML1_with_pci = (
"""<domain> <devices>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='none'/>
<source file='xxx'/>
<target dev='vda' bus='virtio'/>
<alias name='virtio-disk0'/>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x04' function='0x0'/>
</disk>
<hostdev mode="subsystem" type="pci" managed="yes">
<source>
<address function="0x1" slot="0x10" domain="0x0001"
bus="0x04"/>
</source>
</hostdev></devices></domain>""")
fake_domXML1_without_pci = (
"""<domain> <devices>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='none'/>
<source file='xxx'/>
<target dev='vda' bus='virtio'/>
<alias name='virtio-disk0'/>
<address type='pci' domain='0x0001' bus='0x00'
slot='0x04' function='0x0'/>
</disk></devices></domain>""")
pci_device_info = {'compute_node_id': 1,
'instance_uuid': 'uuid',
'address': '0001:04:10.1'}
pci_device = objects.PciDevice(**pci_device_info)
pci_devices = [pci_device]
mocked_get_xml_desc.return_value = fake_domXML1_without_pci
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
dom = fakelibvirt.Domain(
drvr._get_connection(), fake_domXML1_with_pci, False)
guest = libvirt_guest.Guest(dom)
drvr._detach_pci_devices(guest, pci_devices)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc')
def test_detach_pci_devices_timeout(self, mocked_get_xml_desc, *args):
fake_domXML1_with_pci = (
"""<domain> <devices>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='none'/>
<source file='xxx'/>
<target dev='vda' bus='virtio'/>
<alias name='virtio-disk0'/>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x04' function='0x0'/>
</disk>
<hostdev mode="subsystem" type="pci" managed="yes">
<source>
<address function="0x1" slot="0x10" domain="0x0001"
bus="0x04"/>
</source>
</hostdev></devices></domain>""")
pci_device_info = {'compute_node_id': 1,
'instance_uuid': 'uuid',
'address': '0001:04:10.1'}
pci_device = objects.PciDevice(**pci_device_info)
pci_devices = [pci_device]
mocked_get_xml_desc.return_value = fake_domXML1_with_pci
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
dom = fakelibvirt.Domain(
drvr._get_connection(), fake_domXML1_with_pci, False)
guest = libvirt_guest.Guest(dom)
self.assertRaises(exception.PciDeviceDetachFailed,
drvr._detach_pci_devices, guest, pci_devices)
@mock.patch.object(connector, 'get_connector_properties')
def test_get_connector(self, fake_get_connector):
initiator = 'fake.initiator.iqn'
ip = 'fakeip'
host = 'fakehost'
wwpns = ['100010604b019419']
wwnns = ['200010604b019419']
self.flags(my_ip=ip)
self.flags(host=host)
expected = {
'ip': ip,
'initiator': initiator,
'host': host,
'wwpns': wwpns,
'wwnns': wwnns
}
volume = {
'id': 'fake'
}
# TODO(walter-boring) add the fake in os-brick
fake_get_connector.return_value = expected
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
result = drvr.get_volume_connector(volume)
self.assertThat(expected, matchers.DictMatches(result))
@mock.patch.object(connector, 'get_connector_properties')
def test_get_connector_storage_ip(self, fake_get_connector):
ip = '100.100.100.100'
storage_ip = '101.101.101.101'
self.flags(my_block_storage_ip=storage_ip, my_ip=ip)
volume = {
'id': 'fake'
}
expected = {
'ip': storage_ip
}
# TODO(walter-boring) add the fake in os-brick
fake_get_connector.return_value = expected
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
result = drvr.get_volume_connector(volume)
self.assertEqual(storage_ip, result['ip'])
def test_lifecycle_event_registration(self):
calls = []
def fake_registerErrorHandler(*args, **kwargs):
calls.append('fake_registerErrorHandler')
def fake_get_host_capabilities(**args):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = arch.ARMV7
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
calls.append('fake_get_host_capabilities')
return caps
@mock.patch.object(fakelibvirt, 'registerErrorHandler',
side_effect=fake_registerErrorHandler)
@mock.patch.object(host.Host, "get_capabilities",
side_effect=fake_get_host_capabilities)
def test_init_host(get_host_capabilities, register_error_handler):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("test_host")
test_init_host()
# NOTE(dkliban): Will fail if get_host_capabilities is called before
# registerErrorHandler
self.assertEqual(['fake_registerErrorHandler',
'fake_get_host_capabilities'], calls)
def test_sanitize_log_to_xml(self):
# setup fake data
data = {'auth_password': 'scrubme'}
bdm = [{'connection_info': {'data': data}}]
bdi = {'block_device_mapping': bdm}
# Tests that the parameters to the _get_guest_xml method
# are sanitized for passwords when logged.
def fake_debug(*args, **kwargs):
if 'auth_password' in args[0]:
self.assertNotIn('scrubme', args[0])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conf = mock.Mock()
with test.nested(
mock.patch.object(libvirt_driver.LOG, 'debug',
side_effect=fake_debug),
mock.patch.object(drvr, '_get_guest_config', return_value=conf)
) as (
debug_mock, conf_mock
):
drvr._get_guest_xml(self.context, self.test_instance,
network_info={}, disk_info={},
image_meta={}, block_device_info=bdi)
# we don't care what the log message is, we just want to make sure
# our stub method is called which asserts the password is scrubbed
self.assertTrue(debug_mock.called)
@mock.patch.object(time, "time")
def test_get_guest_config(self, time_mock):
time_mock.return_value = 1234567.89
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
test_instance = copy.deepcopy(self.test_instance)
test_instance["display_name"] = "purple tomatoes"
ctxt = context.RequestContext(project_id=123,
project_name="aubergine",
user_id=456,
user_name="pie")
flavor = objects.Flavor(name='m1.small',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs={})
instance_ref = objects.Instance(**test_instance)
instance_ref.flavor = flavor
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info,
context=ctxt)
self.assertEqual(cfg.uuid, instance_ref["uuid"])
self.assertEqual(2, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertEqual(cfg.memory, 6 * units.Ki)
self.assertEqual(cfg.vcpus, 28)
self.assertEqual(cfg.os_type, vm_mode.HVM)
self.assertEqual(cfg.os_boot_dev, ["hd"])
self.assertIsNone(cfg.os_root)
self.assertEqual(len(cfg.devices), 10)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(len(cfg.metadata), 1)
self.assertIsInstance(cfg.metadata[0],
vconfig.LibvirtConfigGuestMetaNovaInstance)
self.assertEqual(version.version_string_with_package(),
cfg.metadata[0].package)
self.assertEqual("purple tomatoes",
cfg.metadata[0].name)
self.assertEqual(1234567.89,
cfg.metadata[0].creationTime)
self.assertEqual("image",
cfg.metadata[0].roottype)
self.assertEqual(str(instance_ref["image_ref"]),
cfg.metadata[0].rootid)
self.assertIsInstance(cfg.metadata[0].owner,
vconfig.LibvirtConfigGuestMetaNovaOwner)
self.assertEqual(456,
cfg.metadata[0].owner.userid)
self.assertEqual("pie",
cfg.metadata[0].owner.username)
self.assertEqual(123,
cfg.metadata[0].owner.projectid)
self.assertEqual("aubergine",
cfg.metadata[0].owner.projectname)
self.assertIsInstance(cfg.metadata[0].flavor,
vconfig.LibvirtConfigGuestMetaNovaFlavor)
self.assertEqual("m1.small",
cfg.metadata[0].flavor.name)
self.assertEqual(6,
cfg.metadata[0].flavor.memory)
self.assertEqual(28,
cfg.metadata[0].flavor.vcpus)
self.assertEqual(496,
cfg.metadata[0].flavor.disk)
self.assertEqual(8128,
cfg.metadata[0].flavor.ephemeral)
self.assertEqual(33550336,
cfg.metadata[0].flavor.swap)
def test_get_guest_config_lxc(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, {'mapping': {}})
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vm_mode.EXE, cfg.os_type)
self.assertEqual("/sbin/init", cfg.os_init_path)
self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline)
self.assertIsNone(cfg.os_root)
self.assertEqual(3, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestFilesys)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
def test_get_guest_config_lxc_with_id_maps(self):
self.flags(virt_type='lxc', group='libvirt')
self.flags(uid_maps=['0:1000:100'], group='libvirt')
self.flags(gid_maps=['0:1000:100'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, {'mapping': {}})
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vm_mode.EXE, cfg.os_type)
self.assertEqual("/sbin/init", cfg.os_init_path)
self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline)
self.assertIsNone(cfg.os_root)
self.assertEqual(3, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestFilesys)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
self.assertEqual(len(cfg.idmaps), 2)
self.assertIsInstance(cfg.idmaps[0],
vconfig.LibvirtConfigGuestUIDMap)
self.assertIsInstance(cfg.idmaps[1],
vconfig.LibvirtConfigGuestGIDMap)
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=True)
def test_get_guest_config_numa_host_instance_fits(self, is_able):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps)):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=True)
def test_get_guest_config_numa_host_instance_no_fit(self, is_able):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([3])),
mock.patch.object(random, 'choice')
) as (get_host_cap_mock,
get_vcpu_pin_set_mock, choice_mock):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertFalse(choice_mock.called)
self.assertEqual(set([3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
def _test_get_guest_memory_backing_config(
self, host_topology, inst_topology, numatune):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
with mock.patch.object(
drvr, "_get_host_numa_topology",
return_value=host_topology):
return drvr._get_guest_memory_backing_config(
inst_topology, numatune, {})
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_get_guest_memory_backing_config_large_success(self, mock_version):
host_topology = objects.NUMATopology(
cells=[
objects.NUMACell(
id=3, cpuset=set([1]), memory=1024, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=2000,
used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512,
used=0),
objects.NUMAPagesTopology(size_kb=1048576, total=0,
used=0),
])])
inst_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024, pagesize=2048)])
numa_tune = vconfig.LibvirtConfigGuestNUMATune()
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
numa_tune.memnodes[0].cellid = 0
numa_tune.memnodes[0].nodeset = [3]
result = self._test_get_guest_memory_backing_config(
host_topology, inst_topology, numa_tune)
self.assertEqual(1, len(result.hugepages))
self.assertEqual(2048, result.hugepages[0].size_kb)
self.assertEqual([0], result.hugepages[0].nodeset)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_get_guest_memory_backing_config_smallest(self, mock_version):
host_topology = objects.NUMATopology(
cells=[
objects.NUMACell(
id=3, cpuset=set([1]), memory=1024, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=2000,
used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512,
used=0),
objects.NUMAPagesTopology(size_kb=1048576, total=0,
used=0),
])])
inst_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024, pagesize=4)])
numa_tune = vconfig.LibvirtConfigGuestNUMATune()
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
numa_tune.memnodes[0].cellid = 0
numa_tune.memnodes[0].nodeset = [3]
result = self._test_get_guest_memory_backing_config(
host_topology, inst_topology, numa_tune)
self.assertIsNone(result)
def test_get_guest_memory_backing_config_realtime(self):
flavor = {"extra_specs": {
"hw:cpu_realtime": "yes",
"hw:cpu_policy": "dedicated"
}}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
membacking = drvr._get_guest_memory_backing_config(
None, None, flavor)
self.assertTrue(membacking.locked)
self.assertFalse(membacking.sharedpages)
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=True)
def test_get_guest_config_numa_host_instance_pci_no_numa_info(
self, is_able):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status=fields.PciDeviceStatus.AVAILABLE,
address='0000:00:00.1',
instance_uuid=None,
request_id=None,
extra_info={},
numa_node=None)
pci_device = objects.PciDevice(**pci_device_info)
with test.nested(
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(
host.Host, "get_capabilities", return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([3])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
mock.patch.object(pci_manager, "get_instance_pci_devs",
return_value=[pci_device])):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(set([3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=True)
def test_get_guest_config_numa_host_instance_2pci_no_fit(self, is_able):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status=fields.PciDeviceStatus.AVAILABLE,
address='0000:00:00.1',
instance_uuid=None,
request_id=None,
extra_info={},
numa_node=1)
pci_device = objects.PciDevice(**pci_device_info)
pci_device_info.update(numa_node=0, address='0000:00:00.2')
pci_device2 = objects.PciDevice(**pci_device_info)
with test.nested(
mock.patch.object(
host.Host, "get_capabilities", return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([3])),
mock.patch.object(random, 'choice'),
mock.patch.object(pci_manager, "get_instance_pci_devs",
return_value=[pci_device, pci_device2])
) as (get_host_cap_mock,
get_vcpu_pin_set_mock, choice_mock, pci_mock):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertFalse(choice_mock.called)
self.assertEqual(set([3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
@mock.patch.object(host.Host, 'get_capabilities')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_set_host_enabled')
def _test_get_guest_config_numa_unsupported(self, fake_lib_version,
fake_version, fake_type,
fake_arch, exception_class,
pagesize, mock_host,
mock_caps, mock_lib_version,
mock_version, mock_type):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([0]),
memory=1024, pagesize=pagesize)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fake_arch
caps.host.topology = self._fake_caps_numa_topology()
mock_type.return_value = fake_type
mock_version.return_value = fake_version
mock_lib_version.return_value = fake_lib_version
mock_caps.return_value = caps
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(exception_class,
drvr._get_guest_config,
instance_ref, [],
image_meta, disk_info)
def test_get_guest_config_numa_old_version_libvirt(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) - 1,
versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION),
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_old_version_libvirt_ppc(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION_PPC) - 1,
versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION),
host.HV_DRIVER_QEMU,
arch.PPC64LE,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_bad_version_libvirt(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
versionutils.convert_version_to_int(
libvirt_driver.BAD_LIBVIRT_NUMA_VERSIONS[0]),
versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION),
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.NUMATopologyUnsupported,
None)
@mock.patch.object(libvirt_driver.LOG, 'warn')
def test_has_numa_support_bad_version_libvirt_log(self, mock_warn):
# Tests that a warning is logged once and only once when there is a bad
# BAD_LIBVIRT_NUMA_VERSIONS detected.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertFalse(hasattr(drvr, '_bad_libvirt_numa_version_warn'))
with mock.patch.object(drvr._host, 'has_version', return_value=True):
for i in range(2):
self.assertFalse(drvr._has_numa_support())
self.assertTrue(drvr._bad_libvirt_numa_version_warn)
self.assertEqual(1, mock_warn.call_count)
# assert the version is logged properly
self.assertEqual('1.2.9.2', mock_warn.call_args[0][1])
def test_get_guest_config_numa_old_version_qemu(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION),
versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) - 1,
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_other_arch_qemu(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION),
versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION),
host.HV_DRIVER_QEMU,
arch.S390,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_xen(self):
self.flags(virt_type='xen', group='libvirt')
self._test_get_guest_config_numa_unsupported(
versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION),
versionutils.convert_version_to_int((4, 5, 0)),
'XEN',
arch.X86_64,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_old_pages_libvirt(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION) - 1,
versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION),
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.MemoryPagesUnsupported,
2048)
def test_get_guest_config_numa_old_pages_qemu(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION),
versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) - 1,
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.NUMATopologyUnsupported,
2048)
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=True)
def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(
self, is_able):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=1024, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology(kb_mem=4194304)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([2, 3])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8)))
) as (has_min_version_mock, get_host_cap_mock,
get_vcpu_pin_set_mock, get_online_cpus_mock):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
# NOTE(ndipanov): we make sure that pin_set was taken into account
# when choosing viable cells
self.assertEqual(set([2, 3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=True)
def test_get_guest_config_non_numa_host_instance_topo(self, is_able):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024),
objects.InstanceNUMACell(
id=1, cpuset=set([2]), memory=1024)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps)):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.numatune)
self.assertIsNotNone(cfg.cpu.numa)
for instance_cell, numa_cfg_cell in zip(
instance_topology.cells, cfg.cpu.numa.cells):
self.assertEqual(instance_cell.id, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=True)
def test_get_guest_config_numa_host_instance_topo(self, is_able):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]), memory=1024, pagesize=None),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]), memory=1024,
pagesize=None)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set',
return_value=set([2, 3, 4, 5])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
# Test that the pinning is correct and limited to allowed only
self.assertEqual(0, cfg.cputune.vcpupin[0].id)
self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[0].cpuset)
self.assertEqual(1, cfg.cputune.vcpupin[1].id)
self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[1].cpuset)
self.assertEqual(2, cfg.cputune.vcpupin[2].id)
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[2].cpuset)
self.assertEqual(3, cfg.cputune.vcpupin[3].id)
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[3].cpuset)
self.assertIsNotNone(cfg.cpu.numa)
self.assertIsInstance(cfg.cputune.emulatorpin,
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset)
for instance_cell, numa_cfg_cell, index in zip(
instance_topology.cells,
cfg.cpu.numa.cells,
range(len(instance_topology.cells))):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
allnodes = [cell.id for cell in instance_topology.cells]
self.assertEqual(allnodes, cfg.numatune.memory.nodeset)
self.assertEqual("strict", cfg.numatune.memory.mode)
for instance_cell, memnode, index in zip(
instance_topology.cells,
cfg.numatune.memnodes,
range(len(instance_topology.cells))):
self.assertEqual(index, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
def test_get_guest_config_numa_host_instance_topo_reordered(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024),
objects.InstanceNUMACell(
id=0, cpuset=set([2, 3]), memory=1024)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
# Test that the pinning is correct and limited to allowed only
self.assertEqual(0, cfg.cputune.vcpupin[0].id)
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[0].cpuset)
self.assertEqual(1, cfg.cputune.vcpupin[1].id)
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[1].cpuset)
self.assertEqual(2, cfg.cputune.vcpupin[2].id)
self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[2].cpuset)
self.assertEqual(3, cfg.cputune.vcpupin[3].id)
self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[3].cpuset)
self.assertIsNotNone(cfg.cpu.numa)
self.assertIsInstance(cfg.cputune.emulatorpin,
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
self.assertEqual(set([0, 1, 6, 7]), cfg.cputune.emulatorpin.cpuset)
for index, (instance_cell, numa_cfg_cell) in enumerate(zip(
instance_topology.cells,
cfg.cpu.numa.cells)):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
self.assertIsNone(numa_cfg_cell.memAccess)
allnodes = set([cell.id for cell in instance_topology.cells])
self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset))
self.assertEqual("strict", cfg.numatune.memory.mode)
for index, (instance_cell, memnode) in enumerate(zip(
instance_topology.cells,
cfg.numatune.memnodes)):
self.assertEqual(index, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
def test_get_guest_config_numa_host_instance_topo_cpu_pinning(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]), memory=1024,
cpu_pinning={0: 24, 1: 25}),
objects.InstanceNUMACell(
id=0, cpuset=set([2, 3]), memory=1024,
cpu_pinning={2: 0, 3: 1})])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology(
sockets_per_cell=4, cores_per_socket=3, threads_per_core=2)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
# Test that the pinning is correct and limited to allowed only
self.assertEqual(0, cfg.cputune.vcpupin[0].id)
self.assertEqual(set([24]), cfg.cputune.vcpupin[0].cpuset)
self.assertEqual(1, cfg.cputune.vcpupin[1].id)
self.assertEqual(set([25]), cfg.cputune.vcpupin[1].cpuset)
self.assertEqual(2, cfg.cputune.vcpupin[2].id)
self.assertEqual(set([0]), cfg.cputune.vcpupin[2].cpuset)
self.assertEqual(3, cfg.cputune.vcpupin[3].id)
self.assertEqual(set([1]), cfg.cputune.vcpupin[3].cpuset)
self.assertIsNotNone(cfg.cpu.numa)
# Emulator must be pinned to union of cfg.cputune.vcpupin[*].cpuset
self.assertIsInstance(cfg.cputune.emulatorpin,
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
self.assertEqual(set([0, 1, 24, 25]),
cfg.cputune.emulatorpin.cpuset)
for i, (instance_cell, numa_cfg_cell) in enumerate(zip(
instance_topology.cells, cfg.cpu.numa.cells)):
self.assertEqual(i, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
self.assertIsNone(numa_cfg_cell.memAccess)
allnodes = set([cell.id for cell in instance_topology.cells])
self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset))
self.assertEqual("strict", cfg.numatune.memory.mode)
for i, (instance_cell, memnode) in enumerate(zip(
instance_topology.cells, cfg.numatune.memnodes)):
self.assertEqual(i, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
def test_get_guest_config_numa_host_mempages_shared(self):
instance_topology = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]),
memory=1024, pagesize=2048),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]),
memory=1024, pagesize=2048)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set',
return_value=set([2, 3, 4, 5])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for instance_cell, numa_cfg_cell, index in zip(
instance_topology.cells,
cfg.cpu.numa.cells,
range(len(instance_topology.cells))):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
self.assertEqual("shared", numa_cfg_cell.memAccess)
allnodes = [cell.id for cell in instance_topology.cells]
self.assertEqual(allnodes, cfg.numatune.memory.nodeset)
self.assertEqual("strict", cfg.numatune.memory.mode)
for instance_cell, memnode, index in zip(
instance_topology.cells,
cfg.numatune.memnodes,
range(len(instance_topology.cells))):
self.assertEqual(index, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
self.assertEqual(0, len(cfg.cputune.vcpusched))
self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset)
def test_get_guest_config_numa_host_instance_cpu_pinning_realtime(self):
instance_topology = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]),
memory=1024, pagesize=2048),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]),
memory=1024, pagesize=2048)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={
"hw:cpu_realtime": "yes",
"hw:cpu_policy": "dedicated",
"hw:cpu_realtime_mask": "^0-1"
})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set',
return_value=set([2, 3, 4, 5])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for instance_cell, numa_cfg_cell, index in zip(
instance_topology.cells,
cfg.cpu.numa.cells,
range(len(instance_topology.cells))):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
self.assertEqual("shared", numa_cfg_cell.memAccess)
allnodes = [cell.id for cell in instance_topology.cells]
self.assertEqual(allnodes, cfg.numatune.memory.nodeset)
self.assertEqual("strict", cfg.numatune.memory.mode)
for instance_cell, memnode, index in zip(
instance_topology.cells,
cfg.numatune.memnodes,
range(len(instance_topology.cells))):
self.assertEqual(index, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
self.assertEqual(1, len(cfg.cputune.vcpusched))
self.assertEqual("fifo", cfg.cputune.vcpusched[0].scheduler)
self.assertEqual(set([2, 3]), cfg.cputune.vcpusched[0].vcpus)
self.assertEqual(set([0, 1]), cfg.cputune.emulatorpin.cpuset)
def test_get_cpu_numa_config_from_instance(self):
topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128),
objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=128),
])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conf = drvr._get_cpu_numa_config_from_instance(topology, True)
self.assertIsInstance(conf, vconfig.LibvirtConfigGuestCPUNUMA)
self.assertEqual(0, conf.cells[0].id)
self.assertEqual(set([1, 2]), conf.cells[0].cpus)
self.assertEqual(131072, conf.cells[0].memory)
self.assertEqual("shared", conf.cells[0].memAccess)
self.assertEqual(1, conf.cells[1].id)
self.assertEqual(set([3, 4]), conf.cells[1].cpus)
self.assertEqual(131072, conf.cells[1].memory)
self.assertEqual("shared", conf.cells[1].memAccess)
def test_get_cpu_numa_config_from_instance_none(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conf = drvr._get_cpu_numa_config_from_instance(None, False)
self.assertIsNone(conf)
@mock.patch.object(host.Host, 'has_version', return_value=True)
def test_has_cpu_policy_support(self, mock_has_version):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.CPUPinningNotSupported,
drvr._has_cpu_policy_support)
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support",
return_value=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_hugepage_support",
return_value=True)
@mock.patch.object(host.Host, "get_capabilities")
def test_does_not_want_hugepages(self, mock_caps, mock_numa, mock_hp):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_topology = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]),
memory=1024, pagesize=4),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]),
memory=1024, pagesize=4)])
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
mock_caps.return_value = caps
host_topology = drvr._get_host_numa_topology()
self.assertFalse(drvr._wants_hugepages(None, None))
self.assertFalse(drvr._wants_hugepages(host_topology, None))
self.assertFalse(drvr._wants_hugepages(None, instance_topology))
self.assertFalse(drvr._wants_hugepages(host_topology,
instance_topology))
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support",
return_value=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_hugepage_support",
return_value=True)
@mock.patch.object(host.Host, "get_capabilities")
def test_does_want_hugepages(self, mock_caps, mock_numa, mock_hp):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_topology = objects.InstanceNUMATopology(
cells=[
objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]),
memory=1024, pagesize=2048),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]),
memory=1024, pagesize=2048)])
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
mock_caps.return_value = caps
host_topology = drvr._get_host_numa_topology()
self.assertTrue(drvr._wants_hugepages(host_topology,
instance_topology))
def test_get_guest_config_clock(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
hpet_map = {
arch.X86_64: True,
arch.I686: True,
arch.PPC: False,
arch.PPC64: False,
arch.ARMV7: False,
arch.AARCH64: False,
}
for guestarch, expect_hpet in hpet_map.items():
with mock.patch.object(libvirt_driver.libvirt_utils,
'get_arch',
return_value=guestarch):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta,
disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "utc")
self.assertIsInstance(cfg.clock.timers[0],
vconfig.LibvirtConfigGuestTimer)
self.assertIsInstance(cfg.clock.timers[1],
vconfig.LibvirtConfigGuestTimer)
self.assertEqual(cfg.clock.timers[0].name, "pit")
self.assertEqual(cfg.clock.timers[0].tickpolicy,
"delay")
self.assertEqual(cfg.clock.timers[1].name, "rtc")
self.assertEqual(cfg.clock.timers[1].tickpolicy,
"catchup")
if expect_hpet:
self.assertEqual(3, len(cfg.clock.timers))
self.assertIsInstance(cfg.clock.timers[2],
vconfig.LibvirtConfigGuestTimer)
self.assertEqual('hpet', cfg.clock.timers[2].name)
self.assertFalse(cfg.clock.timers[2].present)
else:
self.assertEqual(2, len(cfg.clock.timers))
@mock.patch.object(libvirt_utils, 'get_arch')
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_config_windows(self, mock_version, mock_get_arch):
mock_version.return_value = False
mock_get_arch.return_value = arch.I686
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(3, len(cfg.clock.timers), cfg.clock.timers)
self.assertEqual("pit", cfg.clock.timers[0].name)
self.assertEqual("rtc", cfg.clock.timers[1].name)
self.assertEqual("hpet", cfg.clock.timers[2].name)
self.assertFalse(cfg.clock.timers[2].present)
@mock.patch.object(libvirt_utils, 'get_arch')
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_config_windows_timer(self, mock_version, mock_get_arch):
mock_version.return_value = True
mock_get_arch.return_value = arch.I686
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(4, len(cfg.clock.timers), cfg.clock.timers)
self.assertEqual("pit", cfg.clock.timers[0].name)
self.assertEqual("rtc", cfg.clock.timers[1].name)
self.assertEqual("hpet", cfg.clock.timers[2].name)
self.assertFalse(cfg.clock.timers[2].present)
self.assertEqual("hypervclock", cfg.clock.timers[3].name)
self.assertTrue(cfg.clock.timers[3].present)
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureHyperV)
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_config_windows_hyperv_feature1(self, mock_version):
def fake_version(lv_ver=None, hv_ver=None, hv_type=None):
if lv_ver == (1, 0, 0) and hv_ver == (1, 1, 0):
return True
return False
mock_version.side_effect = fake_version
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureHyperV)
self.assertTrue(cfg.features[2].relaxed)
self.assertFalse(cfg.features[2].spinlocks)
self.assertFalse(cfg.features[2].vapic)
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_config_windows_hyperv_feature2(self, mock_version):
mock_version.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureHyperV)
self.assertTrue(cfg.features[2].relaxed)
self.assertTrue(cfg.features[2].spinlocks)
self.assertEqual(8191, cfg.features[2].spinlock_retries)
self.assertTrue(cfg.features[2].vapic)
def test_get_guest_config_with_two_nics(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 2),
image_meta, disk_info)
self.assertEqual(2, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertEqual(cfg.memory, 2 * units.Mi)
self.assertEqual(cfg.vcpus, 1)
self.assertEqual(cfg.os_type, vm_mode.HVM)
self.assertEqual(cfg.os_boot_dev, ["hd"])
self.assertIsNone(cfg.os_root)
self.assertEqual(len(cfg.devices), 10)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
def test_get_guest_config_bug_1118829(self):
self.flags(virt_type='uml', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
disk_info = {'disk_bus': 'virtio',
'cdrom_bus': 'ide',
'mapping': {u'vda': {'bus': 'virtio',
'type': 'disk',
'dev': u'vda'},
'root': {'bus': 'virtio',
'type': 'disk',
'dev': 'vda'}}}
# NOTE(jdg): For this specific test leave this blank
# This will exercise the failed code path still,
# and won't require fakes and stubs of the iscsi discovery
block_device_info = {}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
drvr._get_guest_config(instance_ref, [], image_meta, disk_info,
None, block_device_info)
self.assertEqual(instance_ref['root_device_name'], '/dev/vda')
def test_get_guest_config_with_root_device_name(self):
self.flags(virt_type='uml', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
block_device_info = {'root_device_name': '/dev/vdb'}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
block_device_info)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info,
None, block_device_info)
self.assertEqual(0, len(cfg.features))
self.assertEqual(cfg.memory, 2 * units.Mi)
self.assertEqual(cfg.vcpus, 1)
self.assertEqual(cfg.os_type, "uml")
self.assertEqual(cfg.os_boot_dev, [])
self.assertEqual(cfg.os_root, '/dev/vdb')
self.assertEqual(len(cfg.devices), 3)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
def test_has_uefi_support_with_invalid_version(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
with mock.patch.object(drvr._host,
'has_min_version', return_value=False):
self.assertFalse(drvr._has_uefi_support())
def test_has_uefi_support_not_supported_arch(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "alpha"
self.assertFalse(drvr._has_uefi_support())
@mock.patch('os.path.exists', return_value=False)
def test_has_uefi_support_with_no_loader_existed(self, mock_exist):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertFalse(drvr._has_uefi_support())
@mock.patch('os.path.exists', return_value=True)
def test_has_uefi_support(self, mock_has_version):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
with mock.patch.object(drvr._host,
'has_min_version', return_value=True):
self.assertTrue(drvr._has_uefi_support())
def test_get_guest_config_with_uefi(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_firmware_type": "uefi"}})
instance_ref = objects.Instance(**self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with test.nested(
mock.patch.object(drvr, "_has_uefi_support",
return_value=True)):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(cfg.os_loader_type, "pflash")
def test_get_guest_config_with_block_device(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
conn_info = {'driver_volume_type': 'fake'}
bdms = block_device_obj.block_device_make_list_from_dicts(
self.context, [
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/vdc'}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/vdd'}),
]
)
info = {'block_device_mapping': driver_block_device.convert_volumes(
bdms
)}
info['block_device_mapping'][0]['connection_info'] = conn_info
info['block_device_mapping'][1]['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
info)
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info,
None, info)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, 'vdc')
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[3].target_dev, 'vdd')
mock_save.assert_called_with()
def test_get_guest_config_lxc_with_attached_volume(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
conn_info = {'driver_volume_type': 'fake'}
bdms = block_device_obj.block_device_make_list_from_dicts(
self.context, [
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'source_type': 'volume', 'destination_type': 'volume',
'boot_index': 0}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'source_type': 'volume', 'destination_type': 'volume',
}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 3,
'source_type': 'volume', 'destination_type': 'volume',
}),
]
)
info = {'block_device_mapping': driver_block_device.convert_volumes(
bdms
)}
info['block_device_mapping'][0]['connection_info'] = conn_info
info['block_device_mapping'][1]['connection_info'] = conn_info
info['block_device_mapping'][2]['connection_info'] = conn_info
info['block_device_mapping'][0]['mount_device'] = '/dev/vda'
info['block_device_mapping'][1]['mount_device'] = '/dev/vdc'
info['block_device_mapping'][2]['mount_device'] = '/dev/vdd'
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
info)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info,
None, info)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[1].target_dev, 'vdc')
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, 'vdd')
mock_save.assert_called_with()
def test_get_guest_config_with_configdrive(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
# make configdrive.required_by() return True
instance_ref['config_drive'] = True
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
# The last device is selected for this. on x86 is the last ide
# device (hdd). Since power only support scsi, the last device
# is sdz
expect = {"ppc": "sdz", "ppc64": "sdz"}
disk = expect.get(blockinfo.libvirt_utils.get_arch({}), "hdd")
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, disk)
def test_get_guest_config_with_virtio_scsi_bus(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_scsi_model": "virtio-scsi"}})
instance_ref = objects.Instance(**self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
[])
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestController)
self.assertEqual(cfg.devices[2].model, 'virtio-scsi')
def test_get_guest_config_with_virtio_scsi_bus_bdm(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_scsi_model": "virtio-scsi"}})
instance_ref = objects.Instance(**self.test_instance)
conn_info = {'driver_volume_type': 'fake'}
bdms = block_device_obj.block_device_make_list_from_dicts(
self.context, [
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/sdc', 'disk_bus': 'scsi'}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/sdd', 'disk_bus': 'scsi'}),
]
)
bd_info = {
'block_device_mapping': driver_block_device.convert_volumes(bdms)}
bd_info['block_device_mapping'][0]['connection_info'] = conn_info
bd_info['block_device_mapping'][1]['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
bd_info)
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
cfg = drvr._get_guest_config(instance_ref, [], image_meta,
disk_info, [], bd_info)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, 'sdc')
self.assertEqual(cfg.devices[2].target_bus, 'scsi')
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[3].target_dev, 'sdd')
self.assertEqual(cfg.devices[3].target_bus, 'scsi')
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestController)
self.assertEqual(cfg.devices[4].model, 'virtio-scsi')
mock_save.assert_called_with()
def test_get_guest_config_with_vnc(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False, group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 7)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "vnc")
def test_get_guest_config_with_vnc_and_tablet(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=False, group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "vnc")
def test_get_guest_config_with_spice_and_tablet(self):
self.flags(enabled=False, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=True,
agent_enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "spice")
def test_get_guest_config_with_spice_and_agent(self):
self.flags(enabled=False, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].target_name, "com.redhat.spice.0")
self.assertEqual(cfg.devices[5].type, "spice")
self.assertEqual(cfg.devices[6].type, "qxl")
@mock.patch('nova.console.serial.acquire_port')
@mock.patch('nova.virt.hardware.get_number_of_serial_ports',
return_value=1)
@mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',)
def test_create_serial_console_devices_based_on_arch(self, mock_get_arch,
mock_get_port_number,
mock_acquire_port):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
expected = {arch.X86_64: vconfig.LibvirtConfigGuestSerial,
arch.S390: vconfig.LibvirtConfigGuestConsole,
arch.S390X: vconfig.LibvirtConfigGuestConsole}
for guest_arch, device_type in expected.items():
mock_get_arch.return_value = guest_arch
guest = vconfig.LibvirtConfigGuest()
drvr._create_serial_console_devices(guest, instance=None,
flavor={}, image_meta={})
self.assertEqual(1, len(guest.devices))
console_device = guest.devices[0]
self.assertIsInstance(console_device, device_type)
self.assertEqual("tcp", console_device.type)
@mock.patch('nova.console.serial.acquire_port')
def test_get_guest_config_serial_console(self, acquire_port):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
acquire_port.return_value = 11111
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(8, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("tcp", cfg.devices[2].type)
self.assertEqual(11111, cfg.devices[2].listen_port)
def test_get_guest_config_serial_console_through_flavor(self):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw:serial_port_count': 3}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(10, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("tcp", cfg.devices[2].type)
self.assertEqual("tcp", cfg.devices[3].type)
self.assertEqual("tcp", cfg.devices[4].type)
def test_get_guest_config_serial_console_invalid_flavor(self):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw:serial_port_count': "a"}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(
exception.ImageSerialPortNumberInvalid,
drvr._get_guest_config, instance_ref, [],
image_meta, disk_info)
def test_get_guest_config_serial_console_image_and_flavor(self):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_serial_port_count": "3"}})
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw:serial_port_count': 4}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta,
disk_info)
self.assertEqual(10, len(cfg.devices), cfg.devices)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("tcp", cfg.devices[2].type)
self.assertEqual("tcp", cfg.devices[3].type)
self.assertEqual("tcp", cfg.devices[4].type)
@mock.patch('nova.console.serial.acquire_port')
def test_get_guest_config_serial_console_through_port_rng_exhausted(
self, acquire_port):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
acquire_port.side_effect = exception.SocketPortRangeExhaustedException(
'127.0.0.1')
self.assertRaises(
exception.SocketPortRangeExhaustedException,
drvr._get_guest_config, instance_ref, [],
image_meta, disk_info)
@mock.patch('nova.console.serial.release_port')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info')
@mock.patch.object(host.Host, 'get_guest')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_get_serial_ports_from_guest')
def test_serial_console_release_port(
self, mock_get_serial_ports_from_guest, mock_get_guest,
mock_get_info, mock_release_port):
self.flags(enabled="True", group='serial_console')
guest = libvirt_guest.Guest(FakeVirtDomain())
guest.power_off = mock.Mock()
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.SHUTDOWN)
mock_get_guest.return_value = guest
mock_get_serial_ports_from_guest.return_value = iter([
('127.0.0.1', 10000), ('127.0.0.1', 10001)])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._destroy(objects.Instance(**self.test_instance))
mock_release_port.assert_has_calls(
[mock.call(host='127.0.0.1', port=10000),
mock.call(host='127.0.0.1', port=10001)])
@mock.patch('os.path.getsize', return_value=0) # size doesn't matter
@mock.patch('nova.virt.libvirt.storage.lvm.get_volume_size',
return_value='fake-size')
def test_detach_encrypted_volumes(self, mock_getsize,
mock_get_volume_size):
"""Test that unencrypted volumes are not disconnected with dmcrypt."""
instance = objects.Instance(**self.test_instance)
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<driver name='fake-driver' type='fake-type' />
<source file='filename'/>
<target dev='vdc' bus='virtio'/>
</disk>
<disk type='block' device='disk'>
<driver name='fake-driver' type='fake-type' />
<source dev='/dev/mapper/disk'/>
<target dev='vda'/>
</disk>
<disk type='block' device='disk'>
<driver name='fake-driver' type='fake-type' />
<source dev='/dev/mapper/swap'/>
<target dev='vdb'/>
</disk>
</devices>
</domain>
"""
dom = FakeVirtDomain(fake_xml=xml)
instance.ephemeral_key_uuid = 'fake-id' # encrypted
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@mock.patch.object(dmcrypt, 'delete_volume')
@mock.patch.object(conn._host, 'get_domain', return_value=dom)
def detach_encrypted_volumes(block_device_info, mock_get_domain,
mock_delete_volume):
conn._detach_encrypted_volumes(instance, block_device_info)
mock_get_domain.assert_called_once_with(instance)
self.assertFalse(mock_delete_volume.called)
block_device_info = {'root_device_name': '/dev/vda',
'ephemerals': [],
'block_device_mapping': []}
detach_encrypted_volumes(block_device_info)
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
def test_get_serial_ports_from_guest(self, mock_get_xml_desc):
i = self._test_get_serial_ports_from_guest(None,
mock_get_xml_desc)
self.assertEqual([
('127.0.0.1', 100),
('127.0.0.1', 101),
('127.0.0.2', 100),
('127.0.0.2', 101)], list(i))
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
def test_get_serial_ports_from_guest_bind_only(self, mock_get_xml_desc):
i = self._test_get_serial_ports_from_guest('bind',
mock_get_xml_desc)
self.assertEqual([
('127.0.0.1', 101),
('127.0.0.2', 100)], list(i))
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
def test_get_serial_ports_from_guest_connect_only(self,
mock_get_xml_desc):
i = self._test_get_serial_ports_from_guest('connect',
mock_get_xml_desc)
self.assertEqual([
('127.0.0.1', 100),
('127.0.0.2', 101)], list(i))
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
def test_get_serial_ports_from_guest_on_s390(self, mock_get_xml_desc):
i = self._test_get_serial_ports_from_guest(None,
mock_get_xml_desc,
'console')
self.assertEqual([
('127.0.0.1', 100),
('127.0.0.1', 101),
('127.0.0.2', 100),
('127.0.0.2', 101)], list(i))
def _test_get_serial_ports_from_guest(self, mode, mock_get_xml_desc,
dev_name='serial'):
xml = """
<domain type='kvm'>
<devices>
<%(dev_name)s type="tcp">
<source host="127.0.0.1" service="100" mode="connect"/>
</%(dev_name)s>
<%(dev_name)s type="tcp">
<source host="127.0.0.1" service="101" mode="bind"/>
</%(dev_name)s>
<%(dev_name)s type="tcp">
<source host="127.0.0.2" service="100" mode="bind"/>
</%(dev_name)s>
<%(dev_name)s type="tcp">
<source host="127.0.0.2" service="101" mode="connect"/>
</%(dev_name)s>
</devices>
</domain>""" % {'dev_name': dev_name}
mock_get_xml_desc.return_value = xml
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
guest = libvirt_guest.Guest(FakeVirtDomain())
return drvr._get_serial_ports_from_guest(guest, mode=mode)
def test_get_guest_config_with_type_xen(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='xen',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 6)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[3].type, "vnc")
self.assertEqual(cfg.devices[4].type, "xen")
@mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',
return_value=arch.S390X)
def test_get_guest_config_with_type_kvm_on_s390(self, mock_get_arch):
self.flags(enabled=False, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
self._stub_host_capabilities_cpu_arch(arch.S390X)
instance_ref = objects.Instance(**self.test_instance)
cfg = self._get_guest_config_via_fake_api(instance_ref)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
log_file_device = cfg.devices[2]
self.assertIsInstance(log_file_device,
vconfig.LibvirtConfigGuestConsole)
self.assertEqual("sclplm", log_file_device.target_type)
self.assertEqual("file", log_file_device.type)
terminal_device = cfg.devices[3]
self.assertIsInstance(terminal_device,
vconfig.LibvirtConfigGuestConsole)
self.assertEqual("sclp", terminal_device.target_type)
self.assertEqual("pty", terminal_device.type)
self.assertEqual("s390-ccw-virtio", cfg.os_mach_type)
def _stub_host_capabilities_cpu_arch(self, cpu_arch):
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = cpu_arch
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
def _get_guest_config_via_fake_api(self, instance):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
return drvr._get_guest_config(instance, [],
image_meta, disk_info)
def test_get_guest_config_with_type_xen_pae_hvm(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='xen',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['vm_mode'] = vm_mode.HVM
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(cfg.os_type, vm_mode.HVM)
self.assertEqual(cfg.os_loader, CONF.libvirt.xen_hvmloader_path)
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeaturePAE)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureAPIC)
def test_get_guest_config_with_type_xen_pae_pvm(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='xen',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(cfg.os_type, vm_mode.XEN)
self.assertEqual(1, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeaturePAE)
def test_get_guest_config_with_vnc_and_spice(self):
self.flags(enabled=True, group='vnc')
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 10)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].target_name, "com.redhat.spice.0")
self.assertEqual(cfg.devices[6].type, "vnc")
self.assertEqual(cfg.devices[7].type, "spice")
def test_get_guest_config_with_watchdog_action_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_watchdog_action": "none"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 9)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestWatchdog)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("none", cfg.devices[7].action)
def _test_get_guest_usb_tablet(self, vnc_enabled, spice_enabled, os_type,
agent_enabled=False):
self.flags(enabled=vnc_enabled, group='vnc')
self.flags(enabled=spice_enabled,
agent_enabled=agent_enabled, group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
return drvr._get_guest_usb_tablet(os_type)
def test_get_guest_usb_tablet_wipe(self):
self.flags(use_usb_tablet=True, group='libvirt')
tablet = self._test_get_guest_usb_tablet(True, True, vm_mode.HVM)
self.assertIsNotNone(tablet)
tablet = self._test_get_guest_usb_tablet(True, False, vm_mode.HVM)
self.assertIsNotNone(tablet)
tablet = self._test_get_guest_usb_tablet(False, True, vm_mode.HVM)
self.assertIsNotNone(tablet)
tablet = self._test_get_guest_usb_tablet(False, False, vm_mode.HVM)
self.assertIsNone(tablet)
tablet = self._test_get_guest_usb_tablet(True, True, "foo")
self.assertIsNone(tablet)
tablet = self._test_get_guest_usb_tablet(
False, True, vm_mode.HVM, True)
self.assertIsNone(tablet)
def _test_get_guest_config_with_watchdog_action_flavor(self,
hw_watchdog_action="hw:watchdog_action"):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {hw_watchdog_action: 'none'}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(9, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestWatchdog)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("none", cfg.devices[7].action)
def test_get_guest_config_with_watchdog_action_through_flavor(self):
self._test_get_guest_config_with_watchdog_action_flavor()
# TODO(pkholkin): the test accepting old property name 'hw_watchdog_action'
# should be removed in the next release
def test_get_guest_config_with_watchdog_action_through_flavor_no_scope(
self):
self._test_get_guest_config_with_watchdog_action_flavor(
hw_watchdog_action="hw_watchdog_action")
def test_get_guest_config_with_watchdog_overrides_flavor(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_watchdog_action': 'none'}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_watchdog_action": "pause"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(9, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestWatchdog)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("pause", cfg.devices[7].action)
def test_get_guest_config_with_video_driver_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_video_model": "vmvga"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[5].type, "vnc")
self.assertEqual(cfg.devices[6].type, "vmvga")
def test_get_guest_config_with_qga_through_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_qemu_guest_agent": "yes"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 9)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "vnc")
self.assertEqual(cfg.devices[7].type, "unix")
self.assertEqual(cfg.devices[7].target_name, "org.qemu.guest_agent.0")
def test_get_guest_config_with_video_driver_vram(self):
self.flags(enabled=False, group='vnc')
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_video:ram_max_mb': "100"}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[5].type, "spice")
self.assertEqual(cfg.devices[6].type, "qxl")
self.assertEqual(cfg.devices[6].vram, 64 * units.Mi / units.Ki)
@mock.patch('nova.virt.disk.api.teardown_container')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_unmount_fs_if_error_during_lxc_create_domain(self,
mock_get_inst_path, mock_ensure_tree, mock_setup_container,
mock_get_info, mock_teardown):
"""If we hit an error during a `_create_domain` call to `libvirt+lxc`
we need to ensure the guest FS is unmounted from the host so that any
future `lvremove` calls will work.
"""
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.image.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_get_info.side_effect = exception.InstanceNotFound(
instance_id='foo')
drvr._conn.defineXML = mock.Mock()
drvr._conn.defineXML.side_effect = ValueError('somethingbad')
with test.nested(
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr, 'firewall_driver'),
mock.patch.object(drvr, 'cleanup')):
self.assertRaises(ValueError,
drvr._create_domain_and_network,
self.context,
'xml',
mock_instance, None, None)
mock_teardown.assert_called_with(container_dir='/tmp/rootfs')
def test_video_driver_flavor_limit_not_set(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with mock.patch.object(objects.Instance, 'save'):
self.assertRaises(exception.RequestedVRamTooHigh,
drvr._get_guest_config,
instance_ref,
[],
image_meta,
disk_info)
def test_video_driver_ram_above_flavor_limit(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
instance_ref = objects.Instance(**self.test_instance)
instance_type = instance_ref.get_flavor()
instance_type.extra_specs = {'hw_video:ram_max_mb': "50"}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}})
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with mock.patch.object(objects.Instance, 'save'):
self.assertRaises(exception.RequestedVRamTooHigh,
drvr._get_guest_config,
instance_ref,
[],
image_meta,
disk_info)
def test_get_guest_config_without_qga_through_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_qemu_guest_agent": "no"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "vnc")
def test_get_guest_config_with_rng_device(self):
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_rng_model": "virtio"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestRng)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[6].model, 'random')
self.assertIsNone(cfg.devices[6].backend)
self.assertIsNone(cfg.devices[6].rate_bytes)
self.assertIsNone(cfg.devices[6].rate_period)
def test_get_guest_config_with_rng_not_allowed(self):
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_rng_model": "virtio"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 7)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigMemoryBalloon)
def test_get_guest_config_with_rng_limits(self):
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True',
'hw_rng:rate_bytes': '1024',
'hw_rng:rate_period': '2'}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_rng_model": "virtio"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestRng)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[6].model, 'random')
self.assertIsNone(cfg.devices[6].backend)
self.assertEqual(cfg.devices[6].rate_bytes, 1024)
self.assertEqual(cfg.devices[6].rate_period, 2)
@mock.patch('nova.virt.libvirt.driver.os.path.exists')
def test_get_guest_config_with_rng_backend(self, mock_path):
self.flags(virt_type='kvm',
use_usb_tablet=False,
rng_dev_path='/dev/hw_rng',
group='libvirt')
mock_path.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_rng_model": "virtio"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestRng)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[6].model, 'random')
self.assertEqual(cfg.devices[6].backend, '/dev/hw_rng')
self.assertIsNone(cfg.devices[6].rate_bytes)
self.assertIsNone(cfg.devices[6].rate_period)
@mock.patch('nova.virt.libvirt.driver.os.path.exists')
def test_get_guest_config_with_rng_dev_not_present(self, mock_path):
self.flags(virt_type='kvm',
use_usb_tablet=False,
rng_dev_path='/dev/hw_rng',
group='libvirt')
mock_path.return_value = False
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'}
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_rng_model": "virtio"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(exception.RngDeviceNotExist,
drvr._get_guest_config,
instance_ref,
[],
image_meta, disk_info)
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=True)
def test_guest_cpu_shares_with_multi_vcpu(self, is_able):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.vcpus = 4
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(4096, cfg.cputune.shares)
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=True)
def test_get_guest_config_with_cpu_quota(self, is_able):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000',
'quota:cpu_period': '20000'}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(10000, cfg.cputune.shares)
self.assertEqual(20000, cfg.cputune.period)
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=True)
def test_get_guest_config_with_bogus_cpu_quota(self, is_able):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'quota:cpu_shares': 'fishfood',
'quota:cpu_period': '20000'}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(ValueError,
drvr._get_guest_config,
instance_ref, [], image_meta, disk_info)
@mock.patch.object(
host.Host, "is_cpu_control_policy_capable", return_value=False)
def test_get_update_guest_cputune(self, is_able):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000',
'quota:cpu_period': '20000'}
self.assertRaises(
exception.UnsupportedHostCPUControlPolicy,
drvr._update_guest_cputune, {}, instance_ref.flavor, "kvm")
def _test_get_guest_config_sysinfo_serial(self, expected_serial):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
cfg = drvr._get_guest_config_sysinfo(instance_ref)
self.assertIsInstance(cfg, vconfig.LibvirtConfigGuestSysinfo)
self.assertEqual(version.vendor_string(),
cfg.system_manufacturer)
self.assertEqual(version.product_string(),
cfg.system_product)
self.assertEqual(version.version_string_with_package(),
cfg.system_version)
self.assertEqual(expected_serial,
cfg.system_serial)
self.assertEqual(instance_ref['uuid'],
cfg.system_uuid)
self.assertEqual("Virtual Machine",
cfg.system_family)
def test_get_guest_config_sysinfo_serial_none(self):
self.flags(sysinfo_serial="none", group="libvirt")
self._test_get_guest_config_sysinfo_serial(None)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_host_sysinfo_serial_hardware")
def test_get_guest_config_sysinfo_serial_hardware(self, mock_uuid):
self.flags(sysinfo_serial="hardware", group="libvirt")
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
mock_uuid.return_value = theuuid
self._test_get_guest_config_sysinfo_serial(theuuid)
@contextlib.contextmanager
def patch_exists(self, result):
real_exists = os.path.exists
def fake_exists(filename):
if filename == "/etc/machine-id":
return result
return real_exists(filename)
with mock.patch.object(os.path, "exists") as mock_exists:
mock_exists.side_effect = fake_exists
yield mock_exists
def test_get_guest_config_sysinfo_serial_os(self):
self.flags(sysinfo_serial="os", group="libvirt")
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
with test.nested(
mock.patch.object(six.moves.builtins, "open",
mock.mock_open(read_data=theuuid)),
self.patch_exists(True)):
self._test_get_guest_config_sysinfo_serial(theuuid)
def test_get_guest_config_sysinfo_serial_os_empty_machine_id(self):
self.flags(sysinfo_serial="os", group="libvirt")
with test.nested(
mock.patch.object(six.moves.builtins, "open",
mock.mock_open(read_data="")),
self.patch_exists(True)):
self.assertRaises(exception.NovaException,
self._test_get_guest_config_sysinfo_serial,
None)
def test_get_guest_config_sysinfo_serial_os_no_machine_id_file(self):
self.flags(sysinfo_serial="os", group="libvirt")
with self.patch_exists(False):
self.assertRaises(exception.NovaException,
self._test_get_guest_config_sysinfo_serial,
None)
def test_get_guest_config_sysinfo_serial_auto_hardware(self):
self.flags(sysinfo_serial="auto", group="libvirt")
real_exists = os.path.exists
with test.nested(
mock.patch.object(os.path, "exists"),
mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_host_sysinfo_serial_hardware")
) as (mock_exists, mock_uuid):
def fake_exists(filename):
if filename == "/etc/machine-id":
return False
return real_exists(filename)
mock_exists.side_effect = fake_exists
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
mock_uuid.return_value = theuuid
self._test_get_guest_config_sysinfo_serial(theuuid)
def test_get_guest_config_sysinfo_serial_auto_os(self):
self.flags(sysinfo_serial="auto", group="libvirt")
real_exists = os.path.exists
real_open = builtins.open
with test.nested(
mock.patch.object(os.path, "exists"),
mock.patch.object(builtins, "open"),
) as (mock_exists, mock_open):
def fake_exists(filename):
if filename == "/etc/machine-id":
return True
return real_exists(filename)
mock_exists.side_effect = fake_exists
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
def fake_open(filename, *args, **kwargs):
if filename == "/etc/machine-id":
h = mock.MagicMock()
h.read.return_value = theuuid
h.__enter__.return_value = h
return h
return real_open(filename, *args, **kwargs)
mock_open.side_effect = fake_open
self._test_get_guest_config_sysinfo_serial(theuuid)
def _create_fake_service_compute(self):
service_info = {
'id': 1729,
'host': 'fake',
'report_count': 0
}
service_ref = objects.Service(**service_info)
compute_info = {
'id': 1729,
'vcpus': 2,
'memory_mb': 1024,
'local_gb': 2048,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 0,
'free_ram_mb': 1024,
'free_disk_gb': 2048,
'hypervisor_type': 'xen',
'hypervisor_version': 1,
'running_vms': 0,
'cpu_info': '',
'current_workload': 0,
'service_id': service_ref['id'],
'host': service_ref['host']
}
compute_ref = objects.ComputeNode(**compute_info)
return (service_ref, compute_ref)
def test_get_guest_config_with_pci_passthrough_kvm(self):
self.flags(virt_type='kvm', group='libvirt')
service_ref, compute_ref = self._create_fake_service_compute()
instance = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status=fields.PciDeviceStatus.ALLOCATED,
address='0000:00:00.1',
compute_id=compute_ref['id'],
instance_uuid=instance.uuid,
request_id=None,
extra_info={})
pci_device = objects.PciDevice(**pci_device_info)
pci_list = objects.PciDeviceList()
pci_list.objects.append(pci_device)
instance.pci_devices = pci_list
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
cfg = drvr._get_guest_config(instance, [],
image_meta, disk_info)
had_pci = 0
# care only about the PCI devices
for dev in cfg.devices:
if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
had_pci += 1
self.assertEqual(dev.type, 'pci')
self.assertEqual(dev.managed, 'yes')
self.assertEqual(dev.mode, 'subsystem')
self.assertEqual(dev.domain, "0000")
self.assertEqual(dev.bus, "00")
self.assertEqual(dev.slot, "00")
self.assertEqual(dev.function, "1")
self.assertEqual(had_pci, 1)
def test_get_guest_config_with_pci_passthrough_xen(self):
self.flags(virt_type='xen', group='libvirt')
service_ref, compute_ref = self._create_fake_service_compute()
instance = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status=fields.PciDeviceStatus.ALLOCATED,
address='0000:00:00.2',
compute_id=compute_ref['id'],
instance_uuid=instance.uuid,
request_id=None,
extra_info={})
pci_device = objects.PciDevice(**pci_device_info)
pci_list = objects.PciDeviceList()
pci_list.objects.append(pci_device)
instance.pci_devices = pci_list
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
cfg = drvr._get_guest_config(instance, [],
image_meta, disk_info)
had_pci = 0
# care only about the PCI devices
for dev in cfg.devices:
if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
had_pci += 1
self.assertEqual(dev.type, 'pci')
self.assertEqual(dev.managed, 'no')
self.assertEqual(dev.mode, 'subsystem')
self.assertEqual(dev.domain, "0000")
self.assertEqual(dev.bus, "00")
self.assertEqual(dev.slot, "00")
self.assertEqual(dev.function, "2")
self.assertEqual(had_pci, 1)
def test_get_guest_config_os_command_line_through_image_meta(self):
self.flags(virt_type="kvm",
cpu_mode=None,
group='libvirt')
self.test_instance['kernel_id'] = "fake_kernel_id"
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"os_command_line":
"fake_os_command_line"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_cmdline, "fake_os_command_line")
def test_get_guest_config_os_command_line_without_kernel_id(self):
self.flags(virt_type="kvm",
cpu_mode=None,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"os_command_line":
"fake_os_command_line"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsNone(cfg.os_cmdline)
def test_get_guest_config_os_command_empty(self):
self.flags(virt_type="kvm",
cpu_mode=None,
group='libvirt')
self.test_instance['kernel_id'] = "fake_kernel_id"
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"os_command_line": ""}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
# the instance has 'root=/dev/vda console=tty0 console=ttyS0' set by
# default, so testing an empty string and None value in the
# os_command_line image property must pass
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertNotEqual(cfg.os_cmdline, "")
def test_get_guest_config_armv7(self):
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = arch.ARMV7
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.flags(virt_type="kvm",
group="libvirt")
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_mach_type, "vexpress-a15")
def test_get_guest_config_aarch64(self):
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = arch.AARCH64
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.flags(virt_type="kvm",
group="libvirt")
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_mach_type, "virt")
def test_get_guest_config_machine_type_s390(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigGuestCPU()
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
host_cpu_archs = (arch.S390, arch.S390X)
for host_cpu_arch in host_cpu_archs:
caps.host.cpu.arch = host_cpu_arch
os_mach_type = drvr._get_machine_type(image_meta, caps)
self.assertEqual('s390-ccw-virtio', os_mach_type)
def test_get_guest_config_machine_type_through_image_meta(self):
self.flags(virt_type="kvm",
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
"properties": {"hw_machine_type":
"fake_machine_type"}})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_mach_type, "fake_machine_type")
def test_get_guest_config_machine_type_from_config(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(hw_machine_type=['x86_64=fake_machine_type'],
group='libvirt')
def fake_getCapabilities():
return """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
</cpu>
</host>
</capabilities>
"""
def fake_baselineCPU(cpu, flag):
return """<cpu mode='custom' match='exact'>
<model fallback='allow'>Penryn</model>
<vendor>Intel</vendor>
<feature policy='require' name='xtpr'/>
</cpu>
"""
# Make sure the host arch is mocked as x86_64
self.create_fake_libvirt_mock(getCapabilities=fake_getCapabilities,
baselineCPU=fake_baselineCPU,
getVersion=lambda: 1005001)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_mach_type, "fake_machine_type")
def _test_get_guest_config_ppc64(self, device_index):
"""Test for nova.virt.libvirt.driver.LibvirtDriver._get_guest_config.
"""
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
expected = (arch.PPC64, arch.PPC)
for guestarch in expected:
with mock.patch.object(libvirt_driver.libvirt_utils,
'get_arch',
return_value=guestarch):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta,
disk_info)
self.assertIsInstance(cfg.devices[device_index],
vconfig.LibvirtConfigGuestVideo)
self.assertEqual(cfg.devices[device_index].type, 'vga')
def test_get_guest_config_ppc64_through_image_meta_vnc_enabled(self):
self.flags(enabled=True, group='vnc')
self._test_get_guest_config_ppc64(6)
def test_get_guest_config_ppc64_through_image_meta_spice_enabled(self):
self.flags(enabled=True,
agent_enabled=True,
group='spice')
self._test_get_guest_config_ppc64(8)
def _test_get_guest_config_bootmenu(self, image_meta, extra_specs):
self.flags(virt_type='kvm', group='libvirt')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = extra_specs
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref, image_meta)
conf = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertTrue(conf.os_bootmenu)
def test_get_guest_config_bootmenu_via_image_meta(self):
image_meta = objects.ImageMeta.from_dict(
{"disk_format": "raw",
"properties": {"hw_boot_menu": "True"}})
self._test_get_guest_config_bootmenu(image_meta, {})
def test_get_guest_config_bootmenu_via_extra_specs(self):
image_meta = objects.ImageMeta.from_dict(
self.test_image_meta)
self._test_get_guest_config_bootmenu(image_meta,
{'hw:boot_menu': 'True'})
def test_get_guest_cpu_config_none(self):
self.flags(cpu_mode="none", group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertIsNone(conf.cpu.mode)
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_default_kvm(self):
self.flags(virt_type="kvm",
cpu_mode=None,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-model")
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_default_uml(self):
self.flags(virt_type="uml",
cpu_mode=None,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsNone(conf.cpu)
def test_get_guest_cpu_config_default_lxc(self):
self.flags(virt_type="lxc",
cpu_mode=None,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsNone(conf.cpu)
def test_get_guest_cpu_config_host_passthrough(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(cpu_mode="host-passthrough", group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-passthrough")
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_host_model(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(cpu_mode="host-model", group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-model")
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_custom(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(cpu_mode="custom",
cpu_model="Penryn",
group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "custom")
self.assertEqual(conf.cpu.model, "Penryn")
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_topology(self):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.vcpus = 8
instance_ref.flavor.extra_specs = {'hw:cpu_max_sockets': '4'}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-model")
self.assertEqual(conf.cpu.sockets, 4)
self.assertEqual(conf.cpu.cores, 2)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_memory_balloon_config_by_default(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('virtio', device.model)
self.assertEqual(10, device.period)
def test_get_guest_memory_balloon_config_disable(self):
self.flags(mem_stats_period_seconds=0, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
no_exist = True
for device in cfg.devices:
if device.root_name == 'memballoon':
no_exist = False
break
self.assertTrue(no_exist)
def test_get_guest_memory_balloon_config_period_value(self):
self.flags(mem_stats_period_seconds=21, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('virtio', device.model)
self.assertEqual(21, device.period)
def test_get_guest_memory_balloon_config_qemu(self):
self.flags(virt_type='qemu', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('virtio', device.model)
self.assertEqual(10, device.period)
def test_get_guest_memory_balloon_config_xen(self):
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('xen', device.model)
self.assertEqual(10, device.period)
def test_get_guest_memory_balloon_config_lxc(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
no_exist = True
for device in cfg.devices:
if device.root_name == 'memballoon':
no_exist = False
break
self.assertTrue(no_exist)
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri_no_ramdisk_no_kernel_xen_hvm(self):
instance_data = dict(self.test_instance)
instance_data.update({'vm_mode': vm_mode.HVM})
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, expect_xen_hvm=True)
def test_xml_and_uri_no_ramdisk_no_kernel_xen_pv(self):
instance_data = dict(self.test_instance)
instance_data.update({'vm_mode': vm_mode.XEN})
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, expect_xen_hvm=False,
xen_only=True)
def test_xml_and_uri_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=False)
def test_xml_and_uri_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=True)
def test_xml_and_uri_rescue(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=True, rescue=instance_data)
def test_xml_and_uri_rescue_no_kernel_no_ramdisk(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, rescue=instance_data)
def test_xml_and_uri_rescue_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=True, rescue=instance_data)
def test_xml_and_uri_rescue_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=False, rescue=instance_data)
def test_xml_uuid(self):
self._check_xml_and_uuid(self.test_image_meta)
def test_lxc_container_and_uri(self):
instance_data = dict(self.test_instance)
self._check_xml_and_container(instance_data)
def test_xml_disk_prefix(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_prefix(instance_data, None)
def test_xml_user_specified_disk_prefix(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_prefix(instance_data, 'sd')
def test_xml_disk_driver(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_driver(instance_data)
def test_xml_disk_bus_virtio(self):
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self._check_xml_and_disk_bus(image_meta,
None,
(("disk", "virtio", "vda"),))
def test_xml_disk_bus_ide(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
expected = {arch.PPC: ("cdrom", "scsi", "sda"),
arch.PPC64: ("cdrom", "scsi", "sda")}
expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
("cdrom", "ide", "hda"))
image_meta = objects.ImageMeta.from_dict({
"disk_format": "iso"})
self._check_xml_and_disk_bus(image_meta,
None,
(expec_val,))
def test_xml_disk_bus_ide_and_virtio(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
expected = {arch.PPC: ("cdrom", "scsi", "sda"),
arch.PPC64: ("cdrom", "scsi", "sda")}
swap = {'device_name': '/dev/vdc',
'swap_size': 1}
ephemerals = [{'device_type': 'disk',
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'size': 1}]
block_device_info = {
'swap': swap,
'ephemerals': ephemerals}
expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
("cdrom", "ide", "hda"))
image_meta = objects.ImageMeta.from_dict({
"disk_format": "iso"})
self._check_xml_and_disk_bus(image_meta,
block_device_info,
(expec_val,
("disk", "virtio", "vdb"),
("disk", "virtio", "vdc")))
@mock.patch.object(host.Host, "list_instance_domains")
def test_list_instances(self, mock_list):
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vm3 = FakeVirtDomain(name="instance00000003")
vm4 = FakeVirtDomain(name="instance00000004")
mock_list.return_value = [vm1, vm2, vm3, vm4]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
names = drvr.list_instances()
self.assertEqual(names[0], vm1.name())
self.assertEqual(names[1], vm2.name())
self.assertEqual(names[2], vm3.name())
self.assertEqual(names[3], vm4.name())
mock_list.assert_called_with(only_guests=True, only_running=False)
@mock.patch.object(host.Host, "list_instance_domains")
def test_list_instance_uuids(self, mock_list):
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vm3 = FakeVirtDomain(name="instance00000003")
vm4 = FakeVirtDomain(name="instance00000004")
mock_list.return_value = [vm1, vm2, vm3, vm4]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
uuids = drvr.list_instance_uuids()
self.assertEqual(len(uuids), 4)
self.assertEqual(uuids[0], vm1.UUIDString())
self.assertEqual(uuids[1], vm2.UUIDString())
self.assertEqual(uuids[2], vm3.UUIDString())
self.assertEqual(uuids[3], vm4.UUIDString())
mock_list.assert_called_with(only_guests=True, only_running=False)
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.flags(vcpu_pin_set="4-5")
get_online_cpus.return_value = set([4, 5, 6])
expected_vcpus = 2
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus_out_of_range(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.flags(vcpu_pin_set="4-6")
get_online_cpus.return_value = set([4, 5])
self.assertRaises(exception.Invalid, drvr._get_vcpu_total)
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus_libvirt_error(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virNodeNumOfDevices',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
self.flags(vcpu_pin_set="4-6")
get_online_cpus.side_effect = not_supported_exc
self.assertRaises(exception.Invalid, drvr._get_vcpu_total)
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus_libvirt_error_success(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virNodeNumOfDevices',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
self.flags(vcpu_pin_set="1")
get_online_cpus.side_effect = not_supported_exc
expected_vcpus = 1
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
@mock.patch('nova.virt.libvirt.host.Host.get_cpu_count')
def test_get_host_vcpus_after_hotplug(self, get_cpu_count):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
get_cpu_count.return_value = 2
expected_vcpus = 2
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
get_cpu_count.return_value = 3
expected_vcpus = 3
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_quiesce(self, mock_has_min_version):
self.create_fake_libvirt_mock(lookupByName=self.fake_lookup)
with mock.patch.object(FakeVirtDomain, "fsFreeze") as mock_fsfreeze:
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(
{"properties": {"hw_qemu_guest_agent": "yes"}})
self.assertIsNone(drvr.quiesce(self.context, instance, image_meta))
mock_fsfreeze.assert_called_once_with()
def test_quiesce_not_supported(self):
self.create_fake_libvirt_mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
self.assertRaises(exception.InstanceQuiesceNotSupported,
drvr.quiesce, self.context, instance, None)
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_unquiesce(self, mock_has_min_version):
self.create_fake_libvirt_mock(getLibVersion=lambda: 1002005,
lookupByName=self.fake_lookup)
with mock.patch.object(FakeVirtDomain, "fsThaw") as mock_fsthaw:
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(
{"properties": {"hw_qemu_guest_agent": "yes"}})
self.assertIsNone(drvr.unquiesce(self.context, instance,
image_meta))
mock_fsthaw.assert_called_once_with()
def test_create_snapshot_metadata(self):
base = objects.ImageMeta.from_dict(
{'disk_format': 'raw'})
instance_data = {'kernel_id': 'kernel',
'project_id': 'prj_id',
'ramdisk_id': 'ram_id',
'os_type': None}
instance = objects.Instance(**instance_data)
img_fmt = 'raw'
snp_name = 'snapshot_name'
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name)
expected = {'is_public': False,
'status': 'active',
'name': snp_name,
'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
},
'disk_format': img_fmt,
'container_format': 'bare',
}
self.assertEqual(ret, expected)
# simulate an instance with os_type field defined
# disk format equals to ami
# container format not equals to bare
instance['os_type'] = 'linux'
base = objects.ImageMeta.from_dict(
{'disk_format': 'ami',
'container_format': 'test_container'})
expected['properties']['os_type'] = instance['os_type']
expected['disk_format'] = base.disk_format
expected['container_format'] = base.container_format
ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name)
self.assertEqual(ret, expected)
def test_get_volume_driver(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
connection_info = {'driver_volume_type': 'fake',
'data': {'device_path': '/fake',
'access_mode': 'rw'}}
driver = conn._get_volume_driver(connection_info)
result = isinstance(driver, volume_drivers.LibvirtFakeVolumeDriver)
self.assertTrue(result)
def test_get_volume_driver_unknown(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
connection_info = {'driver_volume_type': 'unknown',
'data': {'device_path': '/fake',
'access_mode': 'rw'}}
self.assertRaises(
exception.VolumeDriverNotFound,
conn._get_volume_driver,
connection_info
)
@mock.patch.object(volume_drivers.LibvirtFakeVolumeDriver,
'connect_volume')
@mock.patch.object(volume_drivers.LibvirtFakeVolumeDriver, 'get_config')
def test_get_volume_config(self, get_config, connect_volume):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
connection_info = {'driver_volume_type': 'fake',
'data': {'device_path': '/fake',
'access_mode': 'rw'}}
bdm = {'device_name': 'vdb',
'disk_bus': 'fake-bus',
'device_type': 'fake-type'}
disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
'dev': 'vdb'}
mock_config = mock.MagicMock()
get_config.return_value = mock_config
config = drvr._get_volume_config(connection_info, disk_info)
get_config.assert_called_once_with(connection_info, disk_info)
self.assertEqual(mock_config, config)
def test_attach_invalid_volume_type(self):
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
instance = objects.Instance(**self.test_instance)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.VolumeDriverNotFound,
drvr.attach_volume, None,
{"driver_volume_type": "badtype"},
instance,
"/dev/sda")
def test_attach_blockio_invalid_hypervisor(self):
self.flags(virt_type='fake_type', group='libvirt')
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
instance = objects.Instance(**self.test_instance)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InvalidHypervisorType,
drvr.attach_volume, None,
{"driver_volume_type": "fake",
"data": {"logical_block_size": "4096",
"physical_block_size": "4096"}
},
instance,
"/dev/sda")
def _test_check_discard(self, mock_log, driver_discard=None,
bus=None, should_log=False):
mock_config = mock.Mock()
mock_config.driver_discard = driver_discard
mock_config.target_bus = bus
mock_instance = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._check_discard_for_attach_volume(mock_config, mock_instance)
self.assertEqual(should_log, mock_log.called)
@mock.patch('nova.virt.libvirt.driver.LOG.debug')
def test_check_discard_for_attach_volume_no_unmap(self, mock_log):
self._test_check_discard(mock_log, driver_discard=None,
bus='scsi', should_log=False)
@mock.patch('nova.virt.libvirt.driver.LOG.debug')
def test_check_discard_for_attach_volume_blk_controller(self, mock_log):
self._test_check_discard(mock_log, driver_discard='unmap',
bus='virtio', should_log=True)
@mock.patch('nova.virt.libvirt.driver.LOG.debug')
def test_check_discard_for_attach_volume_valid_controller(self, mock_log):
self._test_check_discard(mock_log, driver_discard='unmap',
bus='scsi', should_log=False)
@mock.patch('nova.virt.libvirt.driver.LOG.debug')
def test_check_discard_for_attach_volume_blk_controller_no_unmap(self,
mock_log):
self._test_check_discard(mock_log, driver_discard=None,
bus='virtio', should_log=False)
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
@mock.patch('nova.virt.libvirt.host.Host.get_domain')
def test_attach_volume_with_vir_domain_affect_live_flag(self,
mock_get_domain, mock_get_info, get_image):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
image_meta = {}
get_image.return_value = image_meta
mock_dom = mock.MagicMock()
mock_get_domain.return_value = mock_dom
connection_info = {"driver_volume_type": "fake",
"data": {"device_path": "/fake",
"access_mode": "rw"}}
bdm = {'device_name': 'vdb',
'disk_bus': 'fake-bus',
'device_type': 'fake-type'}
disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
'dev': 'vdb'}
mock_get_info.return_value = disk_info
mock_conf = mock.MagicMock()
flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
with test.nested(
mock.patch.object(drvr, '_connect_volume'),
mock.patch.object(drvr, '_get_volume_config',
return_value=mock_conf),
mock.patch.object(drvr, '_set_cache_mode'),
mock.patch.object(drvr, '_check_discard_for_attach_volume')
) as (mock_connect_volume, mock_get_volume_config,
mock_set_cache_mode, mock_check_discard):
for state in (power_state.RUNNING, power_state.PAUSED):
mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
drvr.attach_volume(self.context, connection_info, instance,
"/dev/vdb", disk_bus=bdm['disk_bus'],
device_type=bdm['device_type'])
mock_get_domain.assert_called_with(instance)
mock_get_info.assert_called_with(
instance,
CONF.libvirt.virt_type,
test.MatchType(objects.ImageMeta),
bdm)
mock_connect_volume.assert_called_with(
connection_info, disk_info)
mock_get_volume_config.assert_called_with(
connection_info, disk_info)
mock_set_cache_mode.assert_called_with(mock_conf)
mock_dom.attachDeviceFlags.assert_called_with(
mock_conf.to_xml(), flags=flags)
mock_check_discard.assert_called_with(mock_conf, instance)
@mock.patch('nova.virt.libvirt.host.Host.get_domain')
def test_detach_volume_with_vir_domain_affect_live_flag(self,
mock_get_domain):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_xml_with_disk = """<domain>
<devices>
<disk type='file'>
<source file='/path/to/fake-volume'/>
<target dev='vdc' bus='virtio'/>
</disk>
</devices>
</domain>"""
mock_xml_without_disk = """<domain>
<devices>
</devices>
</domain>"""
mock_dom = mock.MagicMock()
# Second time don't return anything about disk vdc so it looks removed
return_list = [mock_xml_with_disk, mock_xml_without_disk]
# Doubling the size of return list because we test with two guest power
# states
mock_dom.XMLDesc.side_effect = return_list + return_list
connection_info = {"driver_volume_type": "fake",
"data": {"device_path": "/fake",
"access_mode": "rw"}}
flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
with mock.patch.object(drvr, '_disconnect_volume') as \
mock_disconnect_volume:
for state in (power_state.RUNNING, power_state.PAUSED):
mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
mock_get_domain.return_value = mock_dom
drvr.detach_volume(connection_info, instance, '/dev/vdc')
mock_get_domain.assert_called_with(instance)
mock_dom.detachDeviceFlags.assert_called_with("""<disk type="file" device="disk">
<source file="/path/to/fake-volume"/>
<target bus="virtio" dev="vdc"/>
</disk>
""", flags=flags)
mock_disconnect_volume.assert_called_with(
connection_info, 'vdc')
@mock.patch('nova.virt.libvirt.host.Host.get_domain')
def test_detach_volume_disk_not_found(self, mock_get_domain):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_xml_without_disk = """<domain>
<devices>
</devices>
</domain>"""
mock_dom = mock.MagicMock(return_value=mock_xml_without_disk)
connection_info = {"driver_volume_type": "fake",
"data": {"device_path": "/fake",
"access_mode": "rw"}}
mock_dom.info.return_value = [power_state.RUNNING, 512, 512, 2, 1234,
5678]
mock_get_domain.return_value = mock_dom
self.assertRaises(exception.DiskNotFound, drvr.detach_volume,
connection_info, instance, '/dev/vdc')
mock_get_domain.assert_called_once_with(instance)
def test_multi_nic(self):
network_info = _fake_network_info(self, 2)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta)
tree = etree.fromstring(xml)
interfaces = tree.findall("./devices/interface")
self.assertEqual(len(interfaces), 2)
self.assertEqual(interfaces[0].get('type'), 'bridge')
def _behave_supports_direct_io(self, raise_open=False, raise_write=False,
exc=ValueError()):
open_behavior = os.open(os.path.join('.', '.directio.test'),
os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
if raise_open:
open_behavior.AndRaise(exc)
else:
open_behavior.AndReturn(3)
write_bahavior = os.write(3, mox.IgnoreArg())
if raise_write:
write_bahavior.AndRaise(exc)
else:
os.close(3)
os.unlink(3)
def test_supports_direct_io(self):
# O_DIRECT is not supported on all Python runtimes, so on platforms
# where it's not supported (e.g. Mac), we can still test the code-path
# by stubbing out the value.
if not hasattr(os, 'O_DIRECT'):
# `mock` seems to have trouble stubbing an attr that doesn't
# originally exist, so falling back to stubbing out the attribute
# directly.
os.O_DIRECT = 16384
self.addCleanup(delattr, os, 'O_DIRECT')
einval = OSError()
einval.errno = errno.EINVAL
self.mox.StubOutWithMock(os, 'open')
self.mox.StubOutWithMock(os, 'write')
self.mox.StubOutWithMock(os, 'close')
self.mox.StubOutWithMock(os, 'unlink')
_supports_direct_io = libvirt_driver.LibvirtDriver._supports_direct_io
self._behave_supports_direct_io()
self._behave_supports_direct_io(raise_write=True)
self._behave_supports_direct_io(raise_open=True)
self._behave_supports_direct_io(raise_write=True, exc=einval)
self._behave_supports_direct_io(raise_open=True, exc=einval)
self.mox.ReplayAll()
self.assertTrue(_supports_direct_io('.'))
self.assertRaises(ValueError, _supports_direct_io, '.')
self.assertRaises(ValueError, _supports_direct_io, '.')
self.assertFalse(_supports_direct_io('.'))
self.assertFalse(_supports_direct_io('.'))
self.mox.VerifyAll()
def _check_xml_and_container(self, instance):
instance_ref = objects.Instance(**instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(drvr._uri(), 'lxc:///')
network_info = _fake_network_info(self, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta)
tree = etree.fromstring(xml)
check = [
(lambda t: t.find('.').get('type'), 'lxc'),
(lambda t: t.find('./os/type').text, 'exe'),
(lambda t: t.find('./devices/filesystem/target').get('dir'), '/')]
for i, (check, expected_result) in enumerate(check):
self.assertEqual(check(tree),
expected_result,
'%s failed common check %d' % (xml, i))
target = tree.find('./devices/filesystem/source').get('dir')
self.assertTrue(len(target) > 0)
def _check_xml_and_disk_prefix(self, instance, prefix):
instance_ref = objects.Instance(**instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
def _get_prefix(p, default):
if p:
return p + 'a'
return default
type_disk_map = {
'qemu': [
(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'vda'))],
'xen': [
(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'xvda'))],
'kvm': [
(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'vda'))],
'uml': [
(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'ubda'))]
}
for (virt_type, checks) in six.iteritems(type_disk_map):
self.flags(virt_type=virt_type, group='libvirt')
if prefix:
self.flags(disk_prefix=prefix, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
network_info = _fake_network_info(self, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed check %d' %
(check(tree), expected_result, i))
def _check_xml_and_disk_driver(self, image_meta):
os_open = os.open
directio_supported = True
def os_open_stub(path, flags, *args, **kwargs):
if flags & os.O_DIRECT:
if not directio_supported:
raise OSError(errno.EINVAL,
'%s: %s' % (os.strerror(errno.EINVAL), path))
flags &= ~os.O_DIRECT
return os_open(path, flags, *args, **kwargs)
self.stub_out('os.open', os_open_stub)
@staticmethod
def connection_supports_direct_io_stub(dirpath):
return directio_supported
self.stubs.Set(libvirt_driver.LibvirtDriver,
'_supports_direct_io', connection_supports_direct_io_stub)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
network_info = _fake_network_info(self, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for guest_disk in disks:
self.assertEqual(guest_disk.get("cache"), "none")
directio_supported = False
# The O_DIRECT availability is cached on first use in
# LibvirtDriver, hence we re-create it here
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for guest_disk in disks:
self.assertEqual(guest_disk.get("cache"), "writethrough")
def _check_xml_and_disk_bus(self, image_meta,
block_device_info, wantConfig):
instance_ref = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
block_device_info)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta,
block_device_info=block_device_info)
tree = etree.fromstring(xml)
got_disks = tree.findall('./devices/disk')
got_disk_targets = tree.findall('./devices/disk/target')
for i in range(len(wantConfig)):
want_device_type = wantConfig[i][0]
want_device_bus = wantConfig[i][1]
want_device_dev = wantConfig[i][2]
got_device_type = got_disks[i].get('device')
got_device_bus = got_disk_targets[i].get('bus')
got_device_dev = got_disk_targets[i].get('dev')
self.assertEqual(got_device_type, want_device_type)
self.assertEqual(got_device_bus, want_device_bus)
self.assertEqual(got_device_dev, want_device_dev)
def _check_xml_and_uuid(self, image_meta):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
network_info = _fake_network_info(self, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
self.assertEqual(tree.find('./uuid').text,
instance_ref['uuid'])
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_host_sysinfo_serial_hardware",)
def _check_xml_and_uri(self, instance, mock_serial,
expect_ramdisk=False, expect_kernel=False,
rescue=None, expect_xen_hvm=False, xen_only=False):
mock_serial.return_value = "cef19ce0-0ca2-11df-855d-b19fbce37686"
instance_ref = objects.Instance(**instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
xen_vm_mode = vm_mode.XEN
if expect_xen_hvm:
xen_vm_mode = vm_mode.HVM
type_uri_map = {'qemu': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM),
(lambda t: t.find('./devices/emulator'), None)]),
'kvm': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM),
(lambda t: t.find('./devices/emulator'), None)]),
'uml': ('uml:///system',
[(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./os/type').text,
vm_mode.UML)]),
'xen': ('xen:///',
[(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./os/type').text,
xen_vm_mode)])}
if expect_xen_hvm or xen_only:
hypervisors_to_check = ['xen']
else:
hypervisors_to_check = ['qemu', 'kvm', 'xen']
for hypervisor_type in hypervisors_to_check:
check_list = type_uri_map[hypervisor_type][1]
if rescue:
suffix = '.rescue'
else:
suffix = ''
if expect_kernel:
check = (lambda t: self.relpath(t.find('./os/kernel').text).
split('/')[1], 'kernel' + suffix)
else:
check = (lambda t: t.find('./os/kernel'), None)
check_list.append(check)
if expect_kernel:
check = (lambda t: "no_timer_check" in t.find('./os/cmdline').
text, hypervisor_type == "qemu")
check_list.append(check)
# Hypervisors that only support vm_mode.HVM and Xen
# should not produce configuration that results in kernel
# arguments
if not expect_kernel and (hypervisor_type in
['qemu', 'kvm', 'xen']):
check = (lambda t: t.find('./os/root'), None)
check_list.append(check)
check = (lambda t: t.find('./os/cmdline'), None)
check_list.append(check)
if expect_ramdisk:
check = (lambda t: self.relpath(t.find('./os/initrd').text).
split('/')[1], 'ramdisk' + suffix)
else:
check = (lambda t: t.find('./os/initrd'), None)
check_list.append(check)
if hypervisor_type in ['qemu', 'kvm']:
xpath = "./sysinfo/system/entry"
check = (lambda t: t.findall(xpath)[0].get("name"),
"manufacturer")
check_list.append(check)
check = (lambda t: t.findall(xpath)[0].text,
version.vendor_string())
check_list.append(check)
check = (lambda t: t.findall(xpath)[1].get("name"),
"product")
check_list.append(check)
check = (lambda t: t.findall(xpath)[1].text,
version.product_string())
check_list.append(check)
check = (lambda t: t.findall(xpath)[2].get("name"),
"version")
check_list.append(check)
# NOTE(sirp): empty strings don't roundtrip in lxml (they are
# converted to None), so we need an `or ''` to correct for that
check = (lambda t: t.findall(xpath)[2].text or '',
version.version_string_with_package())
check_list.append(check)
check = (lambda t: t.findall(xpath)[3].get("name"),
"serial")
check_list.append(check)
check = (lambda t: t.findall(xpath)[3].text,
"cef19ce0-0ca2-11df-855d-b19fbce37686")
check_list.append(check)
check = (lambda t: t.findall(xpath)[4].get("name"),
"uuid")
check_list.append(check)
check = (lambda t: t.findall(xpath)[4].text,
instance['uuid'])
check_list.append(check)
if hypervisor_type in ['qemu', 'kvm']:
check = (lambda t: t.findall('./devices/serial')[0].get(
'type'), 'file')
check_list.append(check)
check = (lambda t: t.findall('./devices/serial')[1].get(
'type'), 'pty')
check_list.append(check)
check = (lambda t: self.relpath(t.findall(
'./devices/serial/source')[0].get('path')).
split('/')[1], 'console.log')
check_list.append(check)
else:
check = (lambda t: t.find('./devices/console').get(
'type'), 'pty')
check_list.append(check)
common_checks = [
(lambda t: t.find('.').tag, 'domain'),
(lambda t: t.find('./memory').text, '2097152')]
if rescue:
common_checks += [
(lambda t: self.relpath(t.findall('./devices/disk/source')[0].
get('file')).split('/')[1], 'disk.rescue'),
(lambda t: self.relpath(t.findall('./devices/disk/source')[1].
get('file')).split('/')[1], 'disk')]
else:
common_checks += [(lambda t: self.relpath(t.findall(
'./devices/disk/source')[0].get('file')).split('/')[1],
'disk')]
common_checks += [(lambda t: self.relpath(t.findall(
'./devices/disk/source')[1].get('file')).split('/')[1],
'disk.local')]
for virt_type in hypervisors_to_check:
expected_uri = type_uri_map[virt_type][0]
checks = type_uri_map[virt_type][1]
self.flags(virt_type=virt_type, group='libvirt')
with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt:
del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(drvr._uri(), expected_uri)
network_info = _fake_network_info(self, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
rescue=rescue)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta,
rescue=rescue)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed check %d' %
(check(tree), expected_result, i))
for i, (check, expected_result) in enumerate(common_checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed common check %d' %
(check(tree), expected_result, i))
filterref = './devices/interface/filterref'
vif = network_info[0]
nic_id = vif['address'].replace(':', '')
fw = firewall.NWFilterFirewall(drvr)
instance_filter_name = fw._instance_filter_name(instance_ref,
nic_id)
self.assertEqual(tree.find(filterref).get('filter'),
instance_filter_name)
# This test is supposed to make sure we don't
# override a specifically set uri
#
# Deliberately not just assigning this string to CONF.connection_uri
# and checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the CONF.
testuri = 'something completely different'
self.flags(connection_uri=testuri, group='libvirt')
for (virt_type, (expected_uri, checks)) in six.iteritems(type_uri_map):
self.flags(virt_type=virt_type, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(drvr._uri(), testuri)
def test_ensure_filtering_rules_for_instance_timeout(self):
# ensure_filtering_fules_for_instance() finishes with timeout.
# Preparing mocks
def fake_none(self, *args):
return
class FakeTime(object):
def __init__(self):
self.counter = 0
def sleep(self, t):
self.counter += t
fake_timer = FakeTime()
def fake_sleep(t):
fake_timer.sleep(t)
# _fake_network_info must be called before create_fake_libvirt_mock(),
# as _fake_network_info calls importutils.import_class() and
# create_fake_libvirt_mock() mocks importutils.import_class().
network_info = _fake_network_info(self, 1)
self.create_fake_libvirt_mock()
instance_ref = objects.Instance(**self.test_instance)
# Start test
self.mox.ReplayAll()
try:
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr.firewall_driver,
'setup_basic_filtering',
fake_none)
self.stubs.Set(drvr.firewall_driver,
'prepare_instance_filter',
fake_none)
self.stubs.Set(drvr.firewall_driver,
'instance_filter_exists',
fake_none)
self.stubs.Set(greenthread,
'sleep',
fake_sleep)
drvr.ensure_filtering_rules_for_instance(instance_ref,
network_info)
except exception.NovaException as e:
msg = ('The firewall filter for %s does not exist' %
instance_ref['name'])
c1 = (0 <= six.text_type(e).find(msg))
self.assertTrue(c1)
self.assertEqual(29, fake_timer.counter, "Didn't wait the expected "
"amount of time")
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_all_pass_with_block_migration(
self, mock_cpu, mock_test_file):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'disk_available_least': 400,
'cpu_info': 'asdf',
}
filename = "file"
# _check_cpu_match
mock_cpu.return_value = 1
# mounted_on_same_shared_storage
mock_test_file.return_value = filename
# No need for the src_compute_info
return_value = drvr.check_can_live_migrate_destination(self.context,
instance_ref, None, compute_info, True)
return_value.is_volume_backed = False
self.assertThat({"filename": "file",
'image_type': 'default',
'disk_available_mb': 409600,
"disk_over_commit": False,
"block_migration": True,
"is_volume_backed": False},
matchers.DictMatches(return_value.to_legacy_dict()))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_all_pass_no_block_migration(
self, mock_cpu, mock_test_file):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'disk_available_least': 400,
'cpu_info': 'asdf',
}
filename = "file"
# _check_cpu_match
mock_cpu.return_value = 1
# mounted_on_same_shared_storage
mock_test_file.return_value = filename
# No need for the src_compute_info
return_value = drvr.check_can_live_migrate_destination(self.context,
instance_ref, None, compute_info, False)
return_value.is_volume_backed = False
self.assertThat({"filename": "file",
"image_type": 'default',
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": 409600,
"is_volume_backed": False},
matchers.DictMatches(return_value.to_legacy_dict()))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
return_value='fake')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_compare_cpu')
def test_check_can_live_migrate_guest_cpu_none_model(
self, mock_cpu, mock_test_file):
# Tests that when instance.vcpu_model.model is None, the host cpu
# model is used for live migration.
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
instance_ref.vcpu_model.model = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1}
result = drvr.check_can_live_migrate_destination(
self.context, instance_ref, compute_info, compute_info)
result.is_volume_backed = False
mock_cpu.assert_called_once_with(None, 'asdf')
expected_result = {"filename": 'fake',
"image_type": CONF.libvirt.images_type,
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": 1024,
"is_volume_backed": False}
self.assertDictEqual(expected_result, result.to_legacy_dict())
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_no_instance_cpu_info(
self, mock_cpu, mock_test_file):
instance_ref = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': jsonutils.dumps({
"vendor": "AMD",
"arch": arch.I686,
"features": ["sse3"],
"model": "Opteron_G3",
"topology": {"cores": 2, "threads": 1, "sockets": 4}
}), 'disk_available_least': 1}
filename = "file"
# _check_cpu_match
mock_cpu.return_value = 1
# mounted_on_same_shared_storage
mock_test_file.return_value = filename
return_value = drvr.check_can_live_migrate_destination(self.context,
instance_ref, compute_info, compute_info, False)
# NOTE(danms): Compute manager would have set this, so set it here
return_value.is_volume_backed = False
self.assertThat({"filename": "file",
"image_type": 'default',
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": 1024,
"is_volume_backed": False},
matchers.DictMatches(return_value.to_legacy_dict()))
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_incompatible_cpu_raises(
self, mock_cpu):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1}
mock_cpu.side_effect = exception.InvalidCPUInfo(reason='foo')
self.assertRaises(exception.InvalidCPUInfo,
drvr.check_can_live_migrate_destination,
self.context, instance_ref,
compute_info, compute_info, False)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_compatible_host_cpu(self, mock_vconfig, mock_compare):
mock_compare.return_value = 5
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info))
self.assertIsNone(ret)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_handles_not_supported_error_gracefully(self,
mock_vconfig,
mock_compare):
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virCompareCPU',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_compare.side_effect = not_supported_exc
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info))
self.assertIsNone(ret)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt.LibvirtDriver,
'_vcpu_model_to_cpu_config')
def test_compare_cpu_compatible_guest_cpu(self, mock_vcpu_to_cpu,
mock_compare):
mock_compare.return_value = 6
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(jsonutils.dumps(_fake_cpu_info), None)
self.assertIsNone(ret)
def test_compare_cpu_virt_type_xen(self):
self.flags(virt_type='xen', group='libvirt')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(None, None)
self.assertIsNone(ret)
def test_compare_cpu_virt_type_qemu(self):
self.flags(virt_type='qemu', group='libvirt')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(None, None)
self.assertIsNone(ret)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_invalid_cpuinfo_raises(self, mock_vconfig,
mock_compare):
mock_compare.return_value = 0
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InvalidCPUInfo,
conn._compare_cpu, None,
jsonutils.dumps(_fake_cpu_info))
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_incompatible_cpu_raises(self, mock_vconfig,
mock_compare):
mock_compare.side_effect = fakelibvirt.libvirtError('cpu')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationPreCheckError,
conn._compare_cpu, None,
jsonutils.dumps(_fake_cpu_info))
def test_check_can_live_migrate_dest_cleanup_works_correctly(self):
objects.Instance(**self.test_instance)
dest_check_data = objects.LibvirtLiveMigrateData(
filename="file",
block_migration=True,
disk_over_commit=False,
disk_available_mb=1024)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(drvr, '_cleanup_shared_storage_test_file')
drvr._cleanup_shared_storage_test_file("file")
self.mox.ReplayAll()
drvr.check_can_live_migrate_destination_cleanup(self.context,
dest_check_data)
def _mock_can_live_migrate_source(self, block_migration=False,
is_shared_block_storage=False,
is_shared_instance_path=False,
is_booted_from_volume=False,
disk_available_mb=1024,
block_device_info=None,
block_device_text=None):
instance = objects.Instance(**self.test_instance)
dest_check_data = objects.LibvirtLiveMigrateData(
filename='file',
image_type='default',
block_migration=block_migration,
disk_over_commit=False,
disk_available_mb=disk_available_mb)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(drvr, '_is_shared_block_storage')
drvr._is_shared_block_storage(instance, dest_check_data,
block_device_info).AndReturn(is_shared_block_storage)
self.mox.StubOutWithMock(drvr, '_check_shared_storage_test_file')
drvr._check_shared_storage_test_file('file').AndReturn(
is_shared_instance_path)
self.mox.StubOutWithMock(drvr, "get_instance_disk_info")
drvr.get_instance_disk_info(instance,
block_device_info=block_device_info).\
AndReturn(block_device_text)
self.mox.StubOutWithMock(drvr, '_is_booted_from_volume')
drvr._is_booted_from_volume(instance, block_device_text).AndReturn(
is_booted_from_volume)
return (instance, dest_check_data, drvr)
def test_check_can_live_migrate_source_block_migration(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
block_migration=True)
self.mox.StubOutWithMock(drvr, "_assert_dest_node_has_enough_disk")
drvr._assert_dest_node_has_enough_disk(
self.context, instance, dest_check_data.disk_available_mb,
False, None)
self.mox.ReplayAll()
ret = drvr.check_can_live_migrate_source(self.context, instance,
dest_check_data)
self.assertIsInstance(ret, objects.LibvirtLiveMigrateData)
self.assertIn('is_shared_block_storage', ret)
self.assertIn('is_shared_instance_path', ret)
def test_check_can_live_migrate_source_shared_block_storage(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
is_shared_block_storage=True)
self.mox.ReplayAll()
drvr.check_can_live_migrate_source(self.context, instance,
dest_check_data)
def test_check_can_live_migrate_source_shared_instance_path(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
is_shared_instance_path=True)
self.mox.ReplayAll()
drvr.check_can_live_migrate_source(self.context, instance,
dest_check_data)
def test_check_can_live_migrate_source_non_shared_fails(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source()
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
drvr.check_can_live_migrate_source, self.context,
instance, dest_check_data)
def test_check_can_live_migrate_source_shared_block_migration_fails(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
block_migration=True,
is_shared_block_storage=True)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidLocalStorage,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data)
def test_check_can_live_migrate_shared_path_block_migration_fails(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
block_migration=True,
is_shared_instance_path=True)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidLocalStorage,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data, None)
def test_check_can_live_migrate_non_shared_non_block_migration_fails(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source()
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data)
def test_check_can_live_migrate_source_with_dest_not_enough_disk(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
block_migration=True,
disk_available_mb=0)
drvr.get_instance_disk_info(instance,
block_device_info=None).AndReturn(
'[{"virt_disk_size":2}]')
self.mox.ReplayAll()
self.assertRaises(exception.MigrationError,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data)
def test_check_can_live_migrate_source_booted_from_volume(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
is_booted_from_volume=True,
block_device_text='[]')
self.mox.ReplayAll()
drvr.check_can_live_migrate_source(self.context, instance,
dest_check_data)
def test_check_can_live_migrate_source_booted_from_volume_with_swap(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
is_booted_from_volume=True,
block_device_text='[{"path":"disk.swap"}]')
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data)
@mock.patch.object(host.Host, 'has_min_version', return_value=False)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_assert_dest_node_has_enough_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_has_local_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_booted_from_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'get_instance_disk_info')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_shared_block_storage', return_value=False)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_check_shared_storage_test_file', return_value=False)
def test_check_can_live_migrate_source_block_migration_with_bdm_error(
self, mock_check, mock_shared_block, mock_get_bdi,
mock_booted_from_volume, mock_has_local, mock_enough,
mock_min_version):
bdi = {'block_device_mapping': ['bdm']}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
dest_check_data = objects.LibvirtLiveMigrateData(
filename='file',
image_type='default',
block_migration=True,
disk_over_commit=False,
disk_available_mb=100)
self.assertRaises(exception.MigrationPreCheckError,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data,
block_device_info=bdi)
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_assert_dest_node_has_enough_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_has_local_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_booted_from_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'get_instance_disk_info')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_shared_block_storage')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_check_shared_storage_test_file')
def _test_check_can_live_migrate_source_block_migration_none(
self, block_migrate, is_shared_instance_path, is_share_block,
mock_check, mock_shared_block, mock_get_bdi,
mock_booted_from_volume, mock_has_local, mock_enough, mock_verson):
mock_check.return_value = is_shared_instance_path
mock_shared_block.return_value = is_share_block
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
dest_check_data = objects.LibvirtLiveMigrateData(
filename='file',
image_type='default',
disk_over_commit=False,
disk_available_mb=100)
dest_check_data_ret = drvr.check_can_live_migrate_source(
self.context, instance, dest_check_data)
self.assertEqual(block_migrate, dest_check_data_ret.block_migration)
def test_check_can_live_migrate_source_block_migration_none_shared1(self):
self._test_check_can_live_migrate_source_block_migration_none(
False,
True,
False)
def test_check_can_live_migrate_source_block_migration_none_shared2(self):
self._test_check_can_live_migrate_source_block_migration_none(
False,
False,
True)
def test_check_can_live_migrate_source_block_migration_none_no_share(self):
self._test_check_can_live_migrate_source_block_migration_none(
True,
False,
False)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_assert_dest_node_has_enough_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_assert_dest_node_has_enough_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_has_local_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_booted_from_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'get_instance_disk_info')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_shared_block_storage')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_check_shared_storage_test_file')
def test_check_can_live_migration_source_disk_over_commit_none(self,
mock_check, mock_shared_block, mock_get_bdi,
mock_booted_from_volume, mock_has_local,
mock_enough, mock_disk_check):
mock_check.return_value = False
mock_shared_block.return_value = False
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
dest_check_data = objects.LibvirtLiveMigrateData(
filename='file',
image_type='default',
disk_available_mb=100)
drvr.check_can_live_migrate_source(
self.context, instance, dest_check_data)
self.assertFalse(mock_disk_check.called)
def _is_shared_block_storage_test_create_mocks(self, disks):
# Test data
instance_xml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>{}</devices></domain>")
disks_xml = ''
for dsk in disks:
if dsk['type'] is not 'network':
disks_xml = ''.join([disks_xml,
"<disk type='{type}'>"
"<driver name='qemu' type='{driver}'/>"
"<source {source}='{source_path}'/>"
"<target dev='{target_dev}' bus='virtio'/>"
"</disk>".format(**dsk)])
else:
disks_xml = ''.join([disks_xml,
"<disk type='{type}'>"
"<driver name='qemu' type='{driver}'/>"
"<source protocol='{source_proto}'"
"name='{source_image}' >"
"<host name='hostname' port='7000'/>"
"<config file='/path/to/file'/>"
"</source>"
"<target dev='{target_dev}'"
"bus='ide'/>".format(**dsk)])
# Preparing mocks
mock_virDomain = mock.Mock(fakelibvirt.virDomain)
mock_virDomain.XMLDesc = mock.Mock()
mock_virDomain.XMLDesc.return_value = (instance_xml.format(disks_xml))
mock_lookup = mock.Mock()
def mock_lookup_side_effect(name):
return mock_virDomain
mock_lookup.side_effect = mock_lookup_side_effect
mock_getsize = mock.Mock()
mock_getsize.return_value = "10737418240"
return (mock_getsize, mock_lookup)
def test_is_shared_block_storage_rbd(self):
self.flags(images_type='rbd', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_instance_disk_info = mock.Mock()
data = objects.LibvirtLiveMigrateData(image_type='rbd')
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertTrue(drvr._is_shared_block_storage(instance, data,
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_lvm(self):
self.flags(images_type='lvm', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
data = objects.LibvirtLiveMigrateData(image_type='lvm',
is_volume_backed=False,
is_shared_instance_path=False)
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, data,
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_qcow2(self):
self.flags(images_type='qcow2', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
data = objects.LibvirtLiveMigrateData(image_type='qcow2',
is_volume_backed=False,
is_shared_instance_path=False)
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, data,
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_rbd_only_source(self):
self.flags(images_type='rbd', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
data = objects.LibvirtLiveMigrateData(is_shared_instance_path=False,
is_volume_backed=False)
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, data,
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_rbd_only_dest(self):
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
data = objects.LibvirtLiveMigrateData(image_type='rbd',
is_volume_backed=False,
is_shared_instance_path=False)
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, data,
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_volume_backed(self):
disks = [{'type': 'block',
'driver': 'raw',
'source': 'dev',
'source_path': '/dev/disk',
'target_dev': 'vda'}]
bdi = {'block_device_mapping': [
{'connection_info': 'info', 'mount_device': '/dev/vda'}]}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
(mock_getsize, mock_lookup) =\
self._is_shared_block_storage_test_create_mocks(disks)
data = objects.LibvirtLiveMigrateData(is_volume_backed=True,
is_shared_instance_path=False)
with mock.patch.object(host.Host, 'get_domain', mock_lookup):
self.assertTrue(drvr._is_shared_block_storage(instance, data,
block_device_info = bdi))
mock_lookup.assert_called_once_with(instance)
def test_is_shared_block_storage_volume_backed_with_disk(self):
disks = [{'type': 'block',
'driver': 'raw',
'source': 'dev',
'source_path': '/dev/disk',
'target_dev': 'vda'},
{'type': 'file',
'driver': 'raw',
'source': 'file',
'source_path': '/instance/disk.local',
'target_dev': 'vdb'}]
bdi = {'block_device_mapping': [
{'connection_info': 'info', 'mount_device': '/dev/vda'}]}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
(mock_getsize, mock_lookup) =\
self._is_shared_block_storage_test_create_mocks(disks)
data = objects.LibvirtLiveMigrateData(is_volume_backed=True,
is_shared_instance_path=False)
with test.nested(
mock.patch.object(os.path, 'getsize', mock_getsize),
mock.patch.object(host.Host, 'get_domain', mock_lookup)):
self.assertFalse(drvr._is_shared_block_storage(
instance, data,
block_device_info = bdi))
mock_getsize.assert_called_once_with('/instance/disk.local')
mock_lookup.assert_called_once_with(instance)
def test_is_shared_block_storage_nfs(self):
bdi = {'block_device_mapping': []}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_backend = mock.MagicMock()
mock_image_backend.backend.return_value = mock_backend
mock_backend.is_file_in_instance_path.return_value = True
mock_get_instance_disk_info = mock.Mock()
data = objects.LibvirtLiveMigrateData(
is_shared_instance_path=True,
image_type='foo')
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
self.assertTrue(drvr._is_shared_block_storage(
'instance', data, block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_live_migration_update_graphics_xml(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
xml_tmpl = ("<domain type='kvm'>"
"<devices>"
"<graphics type='vnc' listen='{vnc}'>"
"<listen address='{vnc}'/>"
"</graphics>"
"<graphics type='spice' listen='{spice}'>"
"<listen address='{spice}'/>"
"</graphics>"
"</devices>"
"</domain>")
initial_xml = xml_tmpl.format(vnc='1.2.3.4',
spice='5.6.7.8')
target_xml = xml_tmpl.format(vnc='10.0.0.1',
spice='10.0.0.2')
target_xml = etree.tostring(etree.fromstring(target_xml))
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI2")
_bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
initial_xml)
vdmock.migrateToURI2(drvr._live_migration_uri('dest'),
None,
target_xml,
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError("ERR"))
# start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '10.0.0.1', 'spice': '10.0.0.2'}}}
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='10.0.0.1',
graphics_listen_addr_spice='10.0.0.2',
serial_listen_addr='127.0.0.1',
target_connect_addr=None,
bdms=[],
block_migration=False)
self.mox.ReplayAll()
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock, [])
def test_live_migration_update_volume_xml(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
target_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'cde.67890.opst-lun-Z')
# start test
connection_info = {
u'driver_volume_type': u'iscsi',
u'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {
u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
},
}
bdm = objects.LibvirtLiveMigrateBDMInfo(
serial='58a84f6d-3f0c-4e19-a0af-eb657b790657',
bus='virtio', type='disk', dev='vdb',
connection_info=connection_info)
migrate_data = objects.LibvirtLiveMigrateData(
serial_listen_addr='',
target_connect_addr=None,
bdms=[bdm],
block_migration=False)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
test_mock = mock.MagicMock()
with mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') as \
mget_info,\
mock.patch.object(drvr._host, 'get_domain') as mget_domain,\
mock.patch.object(fakelibvirt.virDomain, 'migrateToURI2'),\
mock.patch.object(drvr, '_update_xml') as mupdate:
mget_info.side_effect = exception.InstanceNotFound(
instance_id='foo')
mget_domain.return_value = test_mock
test_mock.XMLDesc.return_value = target_xml
self.assertFalse(drvr._live_migration_operation(
self.context, instance_ref, 'dest', False,
migrate_data, test_mock, []))
mupdate.assert_called_once_with(target_xml, migrate_data.bdms,
{}, '')
def test_live_migration_with_valid_target_connect_addr(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
target_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'cde.67890.opst-lun-Z')
# start test
connection_info = {
u'driver_volume_type': u'iscsi',
u'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {
u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
},
}
bdm = objects.LibvirtLiveMigrateBDMInfo(
serial='58a84f6d-3f0c-4e19-a0af-eb657b790657',
bus='virtio', type='disk', dev='vdb',
connection_info=connection_info)
migrate_data = objects.LibvirtLiveMigrateData(
serial_listen_addr='',
target_connect_addr='127.0.0.2',
bdms=[bdm],
block_migration=False)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
test_mock = mock.MagicMock()
with mock.patch.object(drvr, '_update_xml') as mupdate:
test_mock.XMLDesc.return_value = target_xml
drvr._live_migration_operation(self.context, instance_ref,
'dest', False, migrate_data,
test_mock, [])
test_mock.migrateToURI2.assert_called_once_with(
'qemu+tcp://127.0.0.2/system',
None, mupdate(), None, None, 0)
def test_update_volume_xml(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
initial_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'cde.67890.opst-lun-Z')
target_xml = etree.tostring(etree.fromstring(target_xml))
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial,
bus='virtio',
type='disk',
dev='vdb')
bdmi.connection_info = {u'driver_volume_type': u'iscsi',
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}
conf = vconfig.LibvirtConfigGuestDisk()
conf.source_device = bdmi.type
conf.driver_name = "qemu"
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = bdmi.dev
conf.target_bus = bdmi.bus
conf.serial = bdmi.connection_info.get('serial')
conf.source_type = "block"
conf.source_path = bdmi.connection_info['data'].get('device_path')
with mock.patch.object(drvr, '_get_volume_config',
return_value=conf):
parser = etree.XMLParser(remove_blank_text=True)
xml_doc = etree.fromstring(initial_xml, parser)
config = drvr._update_volume_xml(xml_doc, [bdmi])
xml_doc = etree.fromstring(target_xml, parser)
self.assertEqual(etree.tostring(xml_doc), etree.tostring(config))
def test_live_migration_uri(self):
hypervisor_uri_map = (
('xen', 'xenmigr://%s/system'),
('kvm', 'qemu+tcp://%s/system'),
('qemu', 'qemu+tcp://%s/system'),
# anything else will return None
('lxc', None),
('parallels', None),
('', None),
)
dest = 'destination'
for hyperv, uri in hypervisor_uri_map:
self.flags(virt_type=hyperv, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
if uri is not None:
uri = uri % dest
self.assertEqual(uri, drvr._live_migration_uri(dest))
else:
self.assertRaises(exception.LiveMigrationURINotAvailable,
drvr._live_migration_uri,
dest)
def test_live_migration_uri_forced(self):
dest = 'destination'
for hyperv in ('kvm', 'xen'):
self.flags(virt_type=hyperv, group='libvirt')
forced_uri = 'foo://%s/bar'
self.flags(live_migration_uri=forced_uri, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(forced_uri % dest, drvr._live_migration_uri(dest))
def test_update_volume_xml_no_serial(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
xml_tmpl = """
<domain type='kvm'>
<devices>
<disk type='block' device='disk'>
<driver name='qemu' type='raw' cache='none'/>
<source dev='{device_path}'/>
<target bus='virtio' dev='vdb'/>
<serial></serial>
<address type='pci' domain='0x0' bus='0x0' slot='0x04' \
function='0x0'/>
</disk>
</devices>
</domain>
"""
initial_xml = xml_tmpl.format(device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = xml_tmpl.format(device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = etree.tostring(etree.fromstring(target_xml))
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
connection_info = {
u'driver_volume_type': u'iscsi',
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {
u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
},
}
bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial,
bus='virtio',
dev='vdb',
type='disk')
bdmi.connection_info = connection_info
conf = vconfig.LibvirtConfigGuestDisk()
conf.source_device = bdmi.type
conf.driver_name = "qemu"
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = bdmi.dev
conf.target_bus = bdmi.bus
conf.serial = bdmi.connection_info.get('serial')
conf.source_type = "block"
conf.source_path = bdmi.connection_info['data'].get('device_path')
with mock.patch.object(drvr, '_get_volume_config',
return_value=conf):
xml_doc = etree.fromstring(initial_xml)
config = drvr._update_volume_xml(xml_doc, [bdmi])
self.assertEqual(target_xml, etree.tostring(config))
def test_update_volume_xml_no_connection_info(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
initial_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = etree.tostring(etree.fromstring(target_xml))
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial,
dev='vdb',
type='disk',
bus='scsi',
format='qcow')
bdmi.connection_info = {}
conf = vconfig.LibvirtConfigGuestDisk()
with mock.patch.object(drvr, '_get_volume_config',
return_value=conf):
xml_doc = etree.fromstring(initial_xml)
config = drvr._update_volume_xml(xml_doc, [bdmi])
self.assertEqual(target_xml, etree.tostring(config))
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI2")
@mock.patch.object(fakelibvirt.virDomain, "XMLDesc")
def test_live_migration_update_serial_console_xml(self, mock_xml,
mock_migrate):
self.compute = importutils.import_object(CONF.compute_manager)
instance_ref = self.test_instance
xml_tmpl = ("<domain type='kvm'>"
"<devices>"
"<console type='tcp'>"
"<source mode='bind' host='{addr}' service='10000'/>"
"</console>"
"</devices>"
"</domain>")
initial_xml = xml_tmpl.format(addr='9.0.0.1')
target_xml = xml_tmpl.format(addr='9.0.0.12')
target_xml = etree.tostring(etree.fromstring(target_xml))
# Preparing mocks
mock_xml.return_value = initial_xml
mock_migrate.side_effect = fakelibvirt.libvirtError("ERR")
# start test
bandwidth = CONF.libvirt.live_migration_bandwidth
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='10.0.0.1',
graphics_listen_addr_spice='10.0.0.2',
serial_listen_addr='9.0.0.12',
target_connect_addr=None,
bdms=[],
block_migration=False)
dom = fakelibvirt.virDomain
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, dom, [])
mock_xml.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE)
mock_migrate.assert_called_once_with(
drvr._live_migration_uri('dest'),
None, target_xml, mock.ANY, None, bandwidth)
@mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None,
create=True)
def test_live_migration_fails_with_serial_console_without_migratable(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_ref = self.test_instance
CONF.set_override("enabled", True, "serial_console")
dom = fakelibvirt.virDomain
migrate_data = objects.LibvirtLiveMigrateData(
serial_listen_addr='', target_connect_addr=None,
block_migration=False)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, dom, [])
@mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None,
create=True)
def test_live_migration_uses_migrateToURI_without_migratable_flag(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
_bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.migrateToURI(drvr._live_migration_uri('dest'),
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError("ERR"))
# start test
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='0.0.0.0',
graphics_listen_addr_spice='0.0.0.0',
serial_listen_addr='127.0.0.1',
target_connect_addr=None,
bdms=[],
block_migration=False)
self.mox.ReplayAll()
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock, [])
def test_live_migration_uses_migrateToURI_without_dest_listen_addrs(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
_bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.migrateToURI(drvr._live_migration_uri('dest'),
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError("ERR"))
# start test
migrate_data = objects.LibvirtLiveMigrateData(
serial_listen_addr='',
target_connect_addr=None,
bdms=[],
block_migration=False)
self.mox.ReplayAll()
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock, [])
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._update_xml',
return_value='')
@mock.patch('nova.virt.libvirt.guest.Guest.get_xml_desc', return_value='')
def test_live_migration_uses_migrateToURI3(
self, mock_old_xml, mock_new_xml, mock_migrateToURI3,
mock_min_version):
# Preparing mocks
disk_paths = ['vda', 'vdb']
params = {
'migrate_disks': ['vda', 'vdb'],
'bandwidth': CONF.libvirt.live_migration_bandwidth,
'destination_xml': '',
}
mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR")
# Start test
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='0.0.0.0',
graphics_listen_addr_spice='0.0.0.0',
serial_listen_addr='127.0.0.1',
target_connect_addr=None,
bdms=[],
block_migration=False)
dom = fakelibvirt.virDomain
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance, 'dest',
False, migrate_data, dom, disk_paths)
mock_migrateToURI3.assert_called_once_with(
drvr._live_migration_uri('dest'), params, None)
@mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None,
create=True)
def test_live_migration_fails_without_migratable_flag_or_0_addr(self):
self.flags(enabled=True, vncserver_listen='1.2.3.4', group='vnc')
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
# start test
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='1.2.3.4',
graphics_listen_addr_spice='1.2.3.4',
serial_listen_addr='127.0.0.1',
target_connect_addr=None,
block_migration=False)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock, [])
def test_live_migration_raises_exception(self):
# Confirms recover method is called when exceptions are raised.
# Preparing data
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI2")
_bandwidth = CONF.libvirt.live_migration_bandwidth
if getattr(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None) is None:
vdmock.migrateToURI(drvr._live_migration_uri('dest'),
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError('ERR'))
else:
vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE
).AndReturn(FakeVirtDomain().XMLDesc(flags=0))
vdmock.migrateToURI2(drvr._live_migration_uri('dest'),
None,
mox.IgnoreArg(),
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError('ERR'))
# start test
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='127.0.0.1',
graphics_listen_addr_spice='127.0.0.1',
serial_listen_addr='127.0.0.1',
target_connect_addr=None,
bdms=[],
block_migration=False)
self.mox.ReplayAll()
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock, [])
self.assertEqual(vm_states.ACTIVE, instance_ref.vm_state)
self.assertEqual(power_state.RUNNING, instance_ref.power_state)
def test_live_migration_raises_unsupported_config_exception(self):
# Tests that when migrateToURI2 fails with VIR_ERR_CONFIG_UNSUPPORTED,
# migrateToURI is used instead.
# Preparing data
instance_ref = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, 'migrateToURI2')
self.mox.StubOutWithMock(vdmock, 'migrateToURI')
_bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
FakeVirtDomain().XMLDesc(flags=0))
unsupported_config_error = fakelibvirt.libvirtError('ERR')
unsupported_config_error.err = (
fakelibvirt.VIR_ERR_CONFIG_UNSUPPORTED,)
# This is the first error we hit but since the error code is
# VIR_ERR_CONFIG_UNSUPPORTED we'll try migrateToURI.
vdmock.migrateToURI2(drvr._live_migration_uri('dest'), None,
mox.IgnoreArg(), mox.IgnoreArg(), None,
_bandwidth).AndRaise(unsupported_config_error)
# This is the second and final error that will actually kill the run,
# we use TestingException to make sure it's not the same libvirtError
# above.
vdmock.migrateToURI(drvr._live_migration_uri('dest'),
mox.IgnoreArg(), None,
_bandwidth).AndRaise(test.TestingException('oops'))
graphics_listen_addrs = {'vnc': '0.0.0.0', 'spice': '127.0.0.1'}
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='0.0.0.0',
graphics_listen_addr_spice='127.0.0.1',
serial_listen_addr='127.0.0.1',
target_connect_addr=None,
bdms=[],
block_migration=False)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(
drvr, '_check_graphics_addresses_can_live_migrate')
drvr._check_graphics_addresses_can_live_migrate(graphics_listen_addrs)
self.mox.ReplayAll()
# start test
self.assertRaises(test.TestingException,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock, [])
@mock.patch('shutil.rmtree')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy')
def test_rollback_live_migration_at_dest_not_shared(self, mock_destroy,
mock_get_instance_path,
mock_exist,
mock_shutil
):
# destroy method may raise InstanceTerminationFailure or
# InstancePowerOffFailure, here use their base class Invalid.
mock_destroy.side_effect = exception.Invalid(reason='just test')
fake_instance_path = os.path.join(cfg.CONF.instances_path,
'/fake_instance_uuid')
mock_get_instance_path.return_value = fake_instance_path
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
migrate_data = objects.LibvirtLiveMigrateData(
is_shared_instance_path=False,
instance_relative_path=False)
self.assertRaises(exception.Invalid,
drvr.rollback_live_migration_at_destination,
"context", "instance", [], None, True, migrate_data)
mock_exist.assert_called_once_with(fake_instance_path)
mock_shutil.assert_called_once_with(fake_instance_path)
@mock.patch('shutil.rmtree')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy')
def test_rollback_live_migration_at_dest_shared(self, mock_destroy,
mock_get_instance_path,
mock_exist,
mock_shutil
):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
migrate_data = objects.LibvirtLiveMigrateData(
is_shared_instance_path=True,
instance_relative_path=False)
drvr.rollback_live_migration_at_destination("context", "instance", [],
None, True, migrate_data)
mock_destroy.assert_called_once_with("context", "instance", [],
None, True, migrate_data)
self.assertFalse(mock_get_instance_path.called)
self.assertFalse(mock_exist.called)
self.assertFalse(mock_shutil.called)
@mock.patch.object(host.Host, "has_min_version", return_value=False)
@mock.patch.object(fakelibvirt.Domain, "XMLDesc")
def test_live_migration_copy_disk_paths(self, mock_xml, mock_version):
xml = """
<domain>
<name>dummy</name>
<uuid>d4e13113-918e-42fe-9fc9-861693ffd432</uuid>
<devices>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.root"/>
<target dev="vda"/>
</disk>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.shared"/>
<target dev="vdb"/>
<shareable/>
</disk>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.config"/>
<target dev="vdc"/>
<readonly/>
</disk>
<disk type="block">
<source dev="/dev/mapper/somevol"/>
<target dev="vdd"/>
</disk>
<disk type="network">
<source protocol="https" name="url_path">
<host name="hostname" port="443"/>
</source>
</disk>
</devices>
</domain>"""
mock_xml.return_value = xml
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
dom = fakelibvirt.Domain(drvr._get_connection(), xml, False)
guest = libvirt_guest.Guest(dom)
paths = drvr._live_migration_copy_disk_paths(None, None, guest)
self.assertEqual((["/var/lib/nova/instance/123/disk.root",
"/dev/mapper/somevol"], ['vda', 'vdd']), paths)
@mock.patch.object(host.Host, "has_min_version", return_value=True)
@mock.patch('nova.virt.driver.get_block_device_info')
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch.object(fakelibvirt.Domain, "XMLDesc")
def test_live_migration_copy_disk_paths_selective_block_migration(
self, mock_xml, mock_get_instance,
mock_block_device_info, mock_version):
xml = """
<domain>
<name>dummy</name>
<uuid>d4e13113-918e-42fe-9fc9-861693ffd432</uuid>
<devices>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.root"/>
<target dev="vda"/>
</disk>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.shared"/>
<target dev="vdb"/>
</disk>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.config"/>
<target dev="vdc"/>
</disk>
<disk type="block">
<source dev="/dev/mapper/somevol"/>
<target dev="vdd"/>
</disk>
<disk type="network">
<source protocol="https" name="url_path">
<host name="hostname" port="443"/>
</source>
</disk>
</devices>
</domain>"""
mock_xml.return_value = xml
instance = objects.Instance(**self.test_instance)
instance.root_device_name = '/dev/vda'
block_device_info = {
'swap': {
'disk_bus': u'virtio',
'swap_size': 10,
'device_name': u'/dev/vdc'
},
'root_device_name': u'/dev/vda',
'ephemerals': [{
'guest_format': u'ext3',
'device_name': u'/dev/vdb',
'disk_bus': u'virtio',
'device_type': u'disk',
'size': 1
}],
'block_device_mapping': [{
'guest_format': None,
'boot_index': None,
'mount_device': u'/dev/vdd',
'connection_info': {
u'driver_volume_type': u'iscsi',
'serial': u'147df29f-aec2-4851-b3fe-f68dad151834',
u'data': {
u'access_mode': u'rw',
u'target_discovered': False,
u'encrypted': False,
u'qos_specs': None,
u'target_iqn': u'iqn.2010-10.org.openstack:'
u'volume-147df29f-aec2-4851-b3fe-'
u'f68dad151834',
u'target_portal': u'10.102.44.141:3260', u'volume_id':
u'147df29f-aec2-4851-b3fe-f68dad151834',
u'target_lun': 1,
u'auth_password': u'cXELT66FngwzTwpf',
u'auth_username': u'QbQQjj445uWgeQkFKcVw',
u'auth_method': u'CHAP'
}
},
'disk_bus': None,
'device_type': None,
'delete_on_termination': False
}]
}
mock_block_device_info.return_value = block_device_info
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
dom = fakelibvirt.Domain(drvr._get_connection(), xml, False)
guest = libvirt_guest.Guest(dom)
return_value = drvr._live_migration_copy_disk_paths(context, instance,
guest)
expected = (['/var/lib/nova/instance/123/disk.root',
'/var/lib/nova/instance/123/disk.shared',
'/var/lib/nova/instance/123/disk.config'],
['vda', 'vdb', 'vdc'])
self.assertEqual(expected, return_value)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_live_migration_copy_disk_paths")
def test_live_migration_data_gb_plain(self, mock_paths):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
data_gb = drvr._live_migration_data_gb(instance, [])
self.assertEqual(2, data_gb)
self.assertEqual(0, mock_paths.call_count)
def test_live_migration_data_gb_block(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
def fake_stat(path):
class StatResult(object):
def __init__(self, size):
self._size = size
@property
def st_size(self):
return self._size
if path == "/var/lib/nova/instance/123/disk.root":
return StatResult(10 * units.Gi)
elif path == "/dev/mapper/somevol":
return StatResult(1.5 * units.Gi)
else:
raise Exception("Should not be reached")
disk_paths = ["/var/lib/nova/instance/123/disk.root",
"/dev/mapper/somevol"]
with mock.patch.object(os, "stat") as mock_stat:
mock_stat.side_effect = fake_stat
data_gb = drvr._live_migration_data_gb(instance, disk_paths)
# Expecting 2 GB for RAM, plus 10 GB for disk.root
# and 1.5 GB rounded to 2 GB for somevol, so 14 GB
self.assertEqual(14, data_gb)
EXPECT_SUCCESS = 1
EXPECT_FAILURE = 2
EXPECT_ABORT = 3
@mock.patch.object(time, "time")
@mock.patch.object(time, "sleep",
side_effect=lambda x: eventlet.sleep(0))
@mock.patch.object(host.DomainJobInfo, "for_domain")
@mock.patch.object(objects.Instance, "save")
@mock.patch.object(objects.Migration, "save")
@mock.patch.object(fakelibvirt.Connection, "_mark_running")
@mock.patch.object(fakelibvirt.virDomain, "abortJob")
def _test_live_migration_monitoring(self,
job_info_records,
time_records,
expect_result,
mock_abort,
mock_running,
mock_save,
mock_mig_save,
mock_job_info,
mock_sleep,
mock_time,
expected_mig_status=None):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", True)
guest = libvirt_guest.Guest(dom)
finish_event = eventlet.event.Event()
def fake_job_info(hostself):
while True:
self.assertTrue(len(job_info_records) > 0)
rec = job_info_records.pop(0)
if type(rec) == str:
if rec == "thread-finish":
finish_event.send()
elif rec == "domain-stop":
dom.destroy()
else:
if len(time_records) > 0:
time_records.pop(0)
return rec
return rec
def fake_time():
if len(time_records) > 0:
return time_records[0]
else:
return int(
datetime.datetime(2001, 1, 20, 20, 1, 0)
.strftime('%s'))
mock_job_info.side_effect = fake_job_info
mock_time.side_effect = fake_time
dest = mock.sentinel.migrate_dest
migration = objects.Migration(context=self.context, id=1)
migrate_data = objects.LibvirtLiveMigrateData(
migration=migration)
fake_post_method = mock.MagicMock()
fake_recover_method = mock.MagicMock()
drvr._live_migration_monitor(self.context, instance,
guest, dest,
fake_post_method,
fake_recover_method,
False,
migrate_data,
dom,
finish_event,
[])
mock_mig_save.assert_called_with()
if expect_result == self.EXPECT_SUCCESS:
self.assertFalse(fake_recover_method.called,
'Recover method called when success expected')
self.assertFalse(mock_abort.called,
'abortJob not called when success expected')
fake_post_method.assert_called_once_with(
self.context, instance, dest, False, migrate_data)
else:
if expect_result == self.EXPECT_ABORT:
self.assertTrue(mock_abort.called,
'abortJob called when abort expected')
else:
self.assertFalse(mock_abort.called,
'abortJob not called when failure expected')
self.assertFalse(fake_post_method.called,
'Post method called when success not expected')
if expected_mig_status:
fake_recover_method.assert_called_once_with(
self.context, instance, dest, False, migrate_data,
migration_status=expected_mig_status)
else:
fake_recover_method.assert_called_once_with(
self.context, instance, dest, False, migrate_data)
def test_live_migration_monitor_success(self):
# A normal sequence where see all the normal job states
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_SUCCESS)
def test_live_migration_monitor_success_race(self):
# A normalish sequence but we're too slow to see the
# completed job state
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_SUCCESS)
def test_live_migration_monitor_failed(self):
# A failed sequence where we see all the expected events
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_FAILED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_FAILURE)
def test_live_migration_monitor_failed_race(self):
# A failed sequence where we are too slow to see the
# failed event
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_FAILURE)
def test_live_migration_monitor_cancelled(self):
# A cancelled sequence where we see all the events
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED),
]
self._test_live_migration_monitoring(domain_info_records, [],
self.EXPECT_FAILURE,
expected_mig_status='cancelled')
@mock.patch.object(fakelibvirt.virDomain, "migrateSetMaxDowntime")
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_migration_downtime_steps")
def test_live_migration_monitor_downtime(self, mock_downtime_steps,
mock_set_downtime):
self.flags(live_migration_completion_timeout=1000000,
live_migration_progress_timeout=1000000,
group='libvirt')
# We've setup 4 fake downtime steps - first value is the
# time delay, second is the downtime value
downtime_steps = [
(90, 10),
(180, 50),
(270, 200),
(500, 300),
]
mock_downtime_steps.return_value = downtime_steps
# Each one of these fake times is used for time.time()
# when a new domain_info_records entry is consumed.
# Times are chosen so that only the first 3 downtime
# steps are needed.
fake_times = [0, 1, 30, 95, 150, 200, 300]
# A normal sequence where see all the normal job states
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
]
self._test_live_migration_monitoring(domain_info_records,
fake_times, self.EXPECT_SUCCESS)
mock_set_downtime.assert_has_calls([mock.call(10),
mock.call(50),
mock.call(200)])
def test_live_migration_monitor_completion(self):
self.flags(live_migration_completion_timeout=100,
live_migration_progress_timeout=1000000,
group='libvirt')
# Each one of these fake times is used for time.time()
# when a new domain_info_records entry is consumed.
fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320]
# A normal sequence where see all the normal job states
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED),
]
self._test_live_migration_monitoring(domain_info_records,
fake_times, self.EXPECT_ABORT,
expected_mig_status='cancelled')
def test_live_migration_monitor_progress(self):
self.flags(live_migration_completion_timeout=1000000,
live_migration_progress_timeout=150,
group='libvirt')
# Each one of these fake times is used for time.time()
# when a new domain_info_records entry is consumed.
fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320]
# A normal sequence where see all the normal job states
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED),
]
self._test_live_migration_monitoring(domain_info_records,
fake_times, self.EXPECT_ABORT,
expected_mig_status='cancelled')
def test_live_migration_downtime_steps(self):
self.flags(live_migration_downtime=400, group='libvirt')
self.flags(live_migration_downtime_steps=10, group='libvirt')
self.flags(live_migration_downtime_delay=30, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
steps = drvr._migration_downtime_steps(3.0)
self.assertEqual([
(0, 37),
(90, 38),
(180, 39),
(270, 42),
(360, 46),
(450, 55),
(540, 70),
(630, 98),
(720, 148),
(810, 238),
(900, 400),
], list(steps))
@mock.patch.object(utils, "spawn")
@mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_monitor")
@mock.patch.object(host.Host, "get_guest")
@mock.patch.object(fakelibvirt.Connection, "_mark_running")
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_live_migration_copy_disk_paths")
def test_live_migration_main(self, mock_copy_disk_path, mock_running,
mock_guest, mock_monitor, mock_thread):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
dom = fakelibvirt.Domain(drvr._get_connection(),
"<domain><name>demo</name></domain>", True)
guest = libvirt_guest.Guest(dom)
migrate_data = objects.LibvirtLiveMigrateData(block_migration=True)
disks_to_copy = (['/some/path/one', '/test/path/two'],
['vda', 'vdb'])
mock_copy_disk_path.return_value = disks_to_copy
mock_guest.return_value = guest
def fake_post():
pass
def fake_recover():
pass
drvr._live_migration(self.context, instance, "fakehost",
fake_post, fake_recover, True,
migrate_data)
mock_copy_disk_path.assert_called_once_with(self.context, instance,
guest)
class AnyEventletEvent(object):
def __eq__(self, other):
return type(other) == eventlet.event.Event
mock_thread.assert_called_once_with(
drvr._live_migration_operation,
self.context, instance, "fakehost", True,
migrate_data, dom, disks_to_copy[1])
mock_monitor.assert_called_once_with(
self.context, instance, guest, "fakehost",
fake_post, fake_recover, True,
migrate_data, dom, AnyEventletEvent(), disks_to_copy[0])
def _do_test_create_images_and_backing(self, disk_type):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(drvr, '_fetch_instance_kernel_ramdisk')
self.mox.StubOutWithMock(libvirt_driver.libvirt_utils, 'create_image')
disk_info = {'path': 'foo', 'type': disk_type,
'disk_size': 1 * 1024 ** 3,
'virt_disk_size': 20 * 1024 ** 3,
'backing_file': None}
libvirt_driver.libvirt_utils.create_image(
disk_info['type'], mox.IgnoreArg(), disk_info['virt_disk_size'])
drvr._fetch_instance_kernel_ramdisk(self.context, self.test_instance,
fallback_from_host=None)
self.mox.ReplayAll()
self.stub_out('os.path.exists', lambda *args: False)
drvr._create_images_and_backing(self.context, self.test_instance,
"/fake/instance/dir", [disk_info])
def test_create_images_and_backing_qcow2(self):
self._do_test_create_images_and_backing('qcow2')
def test_create_images_and_backing_raw(self):
self._do_test_create_images_and_backing('raw')
def test_create_images_and_backing_images_not_exist_no_fallback(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info = [
{u'backing_file': u'fake_image_backing_file',
u'disk_size': 10747904,
u'path': u'disk_path',
u'type': u'qcow2',
u'virt_disk_size': 25165824}]
self.test_instance.update({'user_id': 'fake-user',
'os_type': None,
'project_id': 'fake-project'})
instance = objects.Instance(**self.test_instance)
with mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image',
side_effect=exception.ImageNotFound(
image_id="fake_id")):
self.assertRaises(exception.ImageNotFound,
conn._create_images_and_backing,
self.context, instance,
"/fake/instance/dir", disk_info)
def test_create_images_and_backing_images_not_exist_fallback(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info = [
{u'backing_file': u'fake_image_backing_file',
u'disk_size': 10747904,
u'path': u'disk_path',
u'type': u'qcow2',
u'virt_disk_size': 25165824}]
base_dir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
self.test_instance.update({'user_id': 'fake-user',
'os_type': None,
'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id',
'project_id': 'fake-project'})
instance = objects.Instance(**self.test_instance)
with test.nested(
mock.patch.object(libvirt_driver.libvirt_utils, 'copy_image'),
mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image',
side_effect=exception.ImageNotFound(
image_id="fake_id")),
) as (copy_image_mock, fetch_image_mock):
conn._create_images_and_backing(self.context, instance,
"/fake/instance/dir", disk_info,
fallback_from_host="fake_host")
backfile_path = os.path.join(base_dir, 'fake_image_backing_file')
kernel_path = os.path.join(CONF.instances_path,
self.test_instance['uuid'],
'kernel')
ramdisk_path = os.path.join(CONF.instances_path,
self.test_instance['uuid'],
'ramdisk')
copy_image_mock.assert_has_calls([
mock.call(dest=backfile_path, src=backfile_path,
host='fake_host', receive=True),
mock.call(dest=kernel_path, src=kernel_path,
host='fake_host', receive=True),
mock.call(dest=ramdisk_path, src=ramdisk_path,
host='fake_host', receive=True)
])
fetch_image_mock.assert_has_calls([
mock.call(context=self.context,
target=backfile_path,
image_id=self.test_instance['image_ref'],
user_id=self.test_instance['user_id'],
project_id=self.test_instance['project_id'],
max_size=25165824),
mock.call(self.context, kernel_path,
self.test_instance['kernel_id'],
self.test_instance['user_id'],
self.test_instance['project_id']),
mock.call(self.context, ramdisk_path,
self.test_instance['ramdisk_id'],
self.test_instance['user_id'],
self.test_instance['project_id']),
])
@mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image')
@mock.patch.object(os.path, 'exists', return_value=True)
def test_create_images_and_backing_images_exist(self, mock_exists,
mock_fetch_image):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info = [
{u'backing_file': u'fake_image_backing_file',
u'disk_size': 10747904,
u'path': u'disk_path',
u'type': u'qcow2',
u'virt_disk_size': 25165824}]
self.test_instance.update({'user_id': 'fake-user',
'os_type': None,
'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id',
'project_id': 'fake-project'})
instance = objects.Instance(**self.test_instance)
with mock.patch.object(imagebackend.Image, 'get_disk_size'):
conn._create_images_and_backing(self.context, instance,
'/fake/instance/dir', disk_info)
self.assertFalse(mock_fetch_image.called)
def test_create_images_and_backing_ephemeral_gets_created(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info = [
{u'backing_file': u'fake_image_backing_file',
u'disk_size': 10747904,
u'path': u'disk_path',
u'type': u'qcow2',
u'virt_disk_size': 25165824},
{u'backing_file': u'ephemeral_1_default',
u'disk_size': 393216,
u'over_committed_disk_size': 1073348608,
u'path': u'disk_eph_path',
u'type': u'qcow2',
u'virt_disk_size': 1073741824}]
base_dir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
instance = objects.Instance(**self.test_instance)
with test.nested(
mock.patch.object(drvr, '_fetch_instance_kernel_ramdisk'),
mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image'),
mock.patch.object(drvr, '_create_ephemeral'),
mock.patch.object(imagebackend.Image, 'verify_base_size'),
mock.patch.object(imagebackend.Image, 'get_disk_size')
) as (fetch_kernel_ramdisk_mock, fetch_image_mock,
create_ephemeral_mock, verify_base_size_mock, disk_size_mock):
drvr._create_images_and_backing(self.context, instance,
"/fake/instance/dir",
disk_info)
self.assertEqual(len(create_ephemeral_mock.call_args_list), 1)
m_args, m_kwargs = create_ephemeral_mock.call_args_list[0]
self.assertEqual(
os.path.join(base_dir, 'ephemeral_1_default'),
m_kwargs['target'])
self.assertEqual(len(fetch_image_mock.call_args_list), 1)
m_args, m_kwargs = fetch_image_mock.call_args_list[0]
self.assertEqual(
os.path.join(base_dir, 'fake_image_backing_file'),
m_kwargs['target'])
verify_base_size_mock.assert_has_calls([
mock.call(os.path.join(base_dir, 'fake_image_backing_file'),
25165824),
mock.call(os.path.join(base_dir, 'ephemeral_1_default'),
1073741824)
])
def test_create_images_and_backing_disk_info_none(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(drvr, '_fetch_instance_kernel_ramdisk')
drvr._fetch_instance_kernel_ramdisk(self.context, self.test_instance,
fallback_from_host=None)
self.mox.ReplayAll()
drvr._create_images_and_backing(self.context, self.test_instance,
"/fake/instance/dir", None)
def _generate_target_ret(self, target_connect_addr=None):
target_ret = {
'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'},
'target_connect_addr': target_connect_addr,
'serial_listen_addr': '127.0.0.1',
'volume': {
'12345': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'},
'serial': '12345'},
'disk_info': {'bus': 'scsi',
'dev': 'sda',
'type': 'disk'}},
'67890': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'},
'serial': '67890'},
'disk_info': {'bus': 'scsi',
'dev': 'sdb',
'type': 'disk'}}}}
return target_ret
def test_pre_live_migration_works_correctly_mocked(self):
self._test_pre_live_migration_works_correctly_mocked()
def test_pre_live_migration_with_transport_ip(self):
self.flags(live_migration_inbound_addr='127.0.0.2',
group='libvirt')
target_ret = self._generate_target_ret('127.0.0.2')
self._test_pre_live_migration_works_correctly_mocked(target_ret)
def _test_pre_live_migration_works_correctly_mocked(self,
target_ret=None):
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': {'serial': '12345', u'data':
{'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}},
'mount_device': '/dev/sda'},
{'connection_info': {'serial': '67890', u'data':
{'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}},
'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
class FakeNetworkInfo(object):
def fixed_ips(self):
return ["test_ip_addr"]
def fake_none(*args, **kwargs):
return
self.stubs.Set(drvr, '_create_images_and_backing', fake_none)
instance = objects.Instance(**self.test_instance)
c = context.get_admin_context()
nw_info = FakeNetworkInfo()
# Creating mocks
self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
driver.block_device_info_get_mapping(vol
).AndReturn(vol['block_device_mapping'])
self.mox.StubOutWithMock(drvr, "_connect_volume")
for v in vol['block_device_mapping']:
disk_info = {
'bus': "scsi",
'dev': v['mount_device'].rpartition("/")[2],
'type': "disk"
}
drvr._connect_volume(v['connection_info'],
disk_info)
self.mox.StubOutWithMock(drvr, 'plug_vifs')
drvr.plug_vifs(mox.IsA(instance), nw_info)
self.mox.ReplayAll()
migrate_data = {
"block_migration": False,
"instance_relative_path": "foo",
"is_shared_block_storage": False,
"is_shared_instance_path": False,
}
result = drvr.pre_live_migration(
c, instance, vol, nw_info, None,
migrate_data=migrate_data)
if not target_ret:
target_ret = self._generate_target_ret()
self.assertEqual(
result.to_legacy_dict(
pre_migration_result=True)['pre_live_migration_result'],
target_ret)
def test_pre_live_migration_block_with_config_drive_mocked(self):
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def fake_true(*args, **kwargs):
return True
self.stubs.Set(configdrive, 'required_by', fake_true)
instance = objects.Instance(**self.test_instance)
c = context.get_admin_context()
self.assertRaises(exception.NoLiveMigrationForConfigDriveInLibVirt,
drvr.pre_live_migration, c, instance, vol, None,
None, {'is_shared_instance_path': False,
'is_shared_block_storage': False,
'block_migration': False,
'instance_relative_path': 'foo'})
@mock.patch('nova.virt.driver.block_device_info_get_mapping',
return_value=())
@mock.patch('nova.virt.configdrive.required_by',
return_value=True)
def test_pre_live_migration_block_with_config_drive_mocked_with_vfat(
self, mock_required_by, block_device_info_get_mapping):
self.flags(config_drive_format='vfat')
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
res_data = drvr.pre_live_migration(
self.context, instance, vol, [], None,
{'is_shared_instance_path': False,
'is_shared_block_storage': False,
'block_migration': False,
'instance_relative_path': 'foo'})
res_data = res_data.to_legacy_dict(pre_migration_result=True)
block_device_info_get_mapping.assert_called_once_with(
{'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}
]}
)
self.assertEqual({'graphics_listen_addrs': {'spice': '127.0.0.1',
'vnc': '127.0.0.1'},
'target_connect_addr': None,
'serial_listen_addr': '127.0.0.1',
'volume': {}}, res_data['pre_live_migration_result'])
def test_pre_live_migration_vol_backed_works_correctly_mocked(self):
# Creating testdata, using temp dir.
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
vol = {'block_device_mapping': [
{'connection_info': {'serial': '12345', u'data':
{'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}},
'mount_device': '/dev/sda'},
{'connection_info': {'serial': '67890', u'data':
{'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}},
'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def fake_none(*args, **kwargs):
return
self.stubs.Set(drvr, '_create_images_and_backing', fake_none)
class FakeNetworkInfo(object):
def fixed_ips(self):
return ["test_ip_addr"]
inst_ref = objects.Instance(**self.test_instance)
c = context.get_admin_context()
nw_info = FakeNetworkInfo()
# Creating mocks
self.mox.StubOutWithMock(drvr, "_connect_volume")
for v in vol['block_device_mapping']:
disk_info = {
'bus': "scsi",
'dev': v['mount_device'].rpartition("/")[2],
'type': "disk"
}
drvr._connect_volume(v['connection_info'],
disk_info)
self.mox.StubOutWithMock(drvr, 'plug_vifs')
drvr.plug_vifs(mox.IsA(inst_ref), nw_info)
self.mox.ReplayAll()
migrate_data = {'is_shared_instance_path': False,
'is_shared_block_storage': False,
'is_volume_backed': True,
'block_migration': False,
'instance_relative_path': inst_ref['name'],
'disk_over_commit': False,
'disk_available_mb': 123,
'image_type': 'qcow2',
'filename': 'foo',
}
ret = drvr.pre_live_migration(c, inst_ref, vol, nw_info, None,
migrate_data)
target_ret = {
'graphics_listen_addrs': {'spice': '127.0.0.1',
'vnc': '127.0.0.1'},
'target_connect_addr': None,
'serial_listen_addr': '127.0.0.1',
'volume': {
'12345': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'},
'serial': '12345'},
'disk_info': {'bus': 'scsi',
'dev': 'sda',
'type': 'disk'}},
'67890': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'},
'serial': '67890'},
'disk_info': {'bus': 'scsi',
'dev': 'sdb',
'type': 'disk'}}}}
self.assertEqual(
ret.to_legacy_dict(True)['pre_live_migration_result'],
target_ret)
self.assertTrue(os.path.exists('%s/%s/' % (tmpdir,
inst_ref['name'])))
def test_pre_live_migration_plug_vifs_retry_fails(self):
self.flags(live_migration_retry_count=3)
instance = objects.Instance(**self.test_instance)
def fake_plug_vifs(instance, network_info):
raise processutils.ProcessExecutionError()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(eventlet.greenthread, 'sleep',
lambda x: eventlet.sleep(0))
disk_info_json = jsonutils.dumps({})
self.assertRaises(processutils.ProcessExecutionError,
drvr.pre_live_migration,
self.context, instance, block_device_info=None,
network_info=[], disk_info=disk_info_json)
def test_pre_live_migration_plug_vifs_retry_works(self):
self.flags(live_migration_retry_count=3)
called = {'count': 0}
instance = objects.Instance(**self.test_instance)
def fake_plug_vifs(instance, network_info):
called['count'] += 1
if called['count'] < CONF.live_migration_retry_count:
raise processutils.ProcessExecutionError()
else:
return
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(eventlet.greenthread, 'sleep',
lambda x: eventlet.sleep(0))
disk_info_json = jsonutils.dumps({})
drvr.pre_live_migration(self.context, instance, block_device_info=None,
network_info=[], disk_info=disk_info_json)
def test_pre_live_migration_image_not_created_with_shared_storage(self):
migrate_data_set = [{'is_shared_block_storage': False,
'is_shared_instance_path': True,
'is_volume_backed': False,
'filename': 'foo',
'instance_relative_path': 'bar',
'disk_over_commit': False,
'disk_available_mb': 123,
'image_type': 'qcow2',
'block_migration': False},
{'is_shared_block_storage': True,
'is_shared_instance_path': True,
'is_volume_backed': False,
'filename': 'foo',
'instance_relative_path': 'bar',
'disk_over_commit': False,
'disk_available_mb': 123,
'image_type': 'qcow2',
'block_migration': False},
{'is_shared_block_storage': False,
'is_shared_instance_path': True,
'is_volume_backed': False,
'filename': 'foo',
'instance_relative_path': 'bar',
'disk_over_commit': False,
'disk_available_mb': 123,
'image_type': 'qcow2',
'block_migration': True}]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
# creating mocks
with test.nested(
mock.patch.object(drvr,
'_create_images_and_backing'),
mock.patch.object(drvr,
'ensure_filtering_rules_for_instance'),
mock.patch.object(drvr, 'plug_vifs'),
) as (
create_image_mock,
rules_mock,
plug_mock,
):
disk_info_json = jsonutils.dumps({})
for migrate_data in migrate_data_set:
res = drvr.pre_live_migration(self.context, instance,
block_device_info=None,
network_info=[],
disk_info=disk_info_json,
migrate_data=migrate_data)
self.assertFalse(create_image_mock.called)
self.assertIsInstance(res,
objects.LibvirtLiveMigrateData)
def test_pre_live_migration_with_not_shared_instance_path(self):
migrate_data = {'is_shared_block_storage': False,
'is_shared_instance_path': False,
'block_migration': False,
'instance_relative_path': 'foo'}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
def check_instance_dir(context, instance,
instance_dir, disk_info,
fallback_from_host=False):
self.assertTrue(instance_dir)
# creating mocks
with test.nested(
mock.patch.object(drvr,
'_create_images_and_backing',
side_effect=check_instance_dir),
mock.patch.object(drvr,
'ensure_filtering_rules_for_instance'),
mock.patch.object(drvr, 'plug_vifs'),
) as (
create_image_mock,
rules_mock,
plug_mock,
):
disk_info_json = jsonutils.dumps({})
res = drvr.pre_live_migration(self.context, instance,
block_device_info=None,
network_info=[],
disk_info=disk_info_json,
migrate_data=migrate_data)
create_image_mock.assert_has_calls(
[mock.call(self.context, instance, mock.ANY, {},
fallback_from_host=instance.host)])
self.assertIsInstance(res, objects.LibvirtLiveMigrateData)
def test_pre_live_migration_recreate_disk_info(self):
migrate_data = {'is_shared_block_storage': False,
'is_shared_instance_path': False,
'block_migration': True,
'instance_relative_path': '/some/path/'}
disk_info = [{'disk_size': 5368709120, 'type': 'raw',
'virt_disk_size': 5368709120,
'path': '/some/path/disk',
'backing_file': '', 'over_committed_disk_size': 0},
{'disk_size': 1073741824, 'type': 'raw',
'virt_disk_size': 1073741824,
'path': '/some/path/disk.eph0',
'backing_file': '', 'over_committed_disk_size': 0}]
image_disk_info = {'/some/path/disk': 'raw',
'/some/path/disk.eph0': 'raw'}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
instance_path = os.path.dirname(disk_info[0]['path'])
disk_info_path = os.path.join(instance_path, 'disk.info')
with test.nested(
mock.patch.object(os, 'mkdir'),
mock.patch.object(fake_libvirt_utils, 'write_to_file'),
mock.patch.object(drvr, '_create_images_and_backing')
) as (
mkdir, write_to_file, create_images_and_backing
):
drvr.pre_live_migration(self.context, instance,
block_device_info=None,
network_info=[],
disk_info=jsonutils.dumps(disk_info),
migrate_data=migrate_data)
write_to_file.assert_called_with(disk_info_path,
jsonutils.dumps(image_disk_info))
def test_get_instance_disk_info_works_correctly(self):
# Test data
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance.name:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
os.path.getsize('/test/disk.local').AndReturn((3328599655))
ret = ("image: /test/disk\n"
"file format: raw\n"
"virtual size: 20G (21474836480 bytes)\n"
"disk size: 3.1G\n"
"cluster_size: 2097152\n"
"backing file: /test/dummy (actual path: /backing/file)\n")
self.mox.StubOutWithMock(os.path, "exists")
os.path.exists('/test/disk.local').AndReturn(True)
self.mox.StubOutWithMock(utils, "execute")
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/test/disk.local', prlimit = images.QEMU_IMG_LIMITS,
).AndReturn((ret, ''))
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_instance_disk_info(instance)
info = jsonutils.loads(info)
self.assertEqual(info[0]['type'], 'raw')
self.assertEqual(info[0]['path'], '/test/disk')
self.assertEqual(info[0]['disk_size'], 10737418240)
self.assertEqual(info[0]['backing_file'], "")
self.assertEqual(info[0]['over_committed_disk_size'], 0)
self.assertEqual(info[1]['type'], 'qcow2')
self.assertEqual(info[1]['path'], '/test/disk.local')
self.assertEqual(info[1]['virt_disk_size'], 21474836480)
self.assertEqual(info[1]['backing_file'], "file")
self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
def test_post_live_migration(self):
vol = {'block_device_mapping': [
{'connection_info': {
'data': {'multipath_id': 'dummy1'},
'serial': 'fake_serial1'},
'mount_device': '/dev/sda',
},
{'connection_info': {
'data': {},
'serial': 'fake_serial2'},
'mount_device': '/dev/sdb', }]}
def fake_initialize_connection(context, volume_id, connector):
return {'data': {}}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
fake_connector = {'host': 'fake'}
inst_ref = {'id': 'foo'}
cntx = context.get_admin_context()
# Set up the mock expectations
with test.nested(
mock.patch.object(driver, 'block_device_info_get_mapping',
return_value=vol['block_device_mapping']),
mock.patch.object(drvr, "get_volume_connector",
return_value=fake_connector),
mock.patch.object(drvr._volume_api, "initialize_connection",
side_effect=fake_initialize_connection),
mock.patch.object(drvr, '_disconnect_volume')
) as (block_device_info_get_mapping, get_volume_connector,
initialize_connection, _disconnect_volume):
drvr.post_live_migration(cntx, inst_ref, vol)
block_device_info_get_mapping.assert_has_calls([
mock.call(vol)])
get_volume_connector.assert_has_calls([
mock.call(inst_ref)])
_disconnect_volume.assert_has_calls([
mock.call({'data': {'multipath_id': 'dummy1'}}, 'sda'),
mock.call({'data': {}}, 'sdb')])
def test_get_instance_disk_info_excludes_volumes(self):
# Test data
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/fake/path/to/volume1'/>"
"<target dev='vdc' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/fake/path/to/volume2'/>"
"<target dev='vdd' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance.name:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
os.path.getsize('/test/disk.local').AndReturn((3328599655))
ret = ("image: /test/disk\n"
"file format: raw\n"
"virtual size: 20G (21474836480 bytes)\n"
"disk size: 3.1G\n"
"cluster_size: 2097152\n"
"backing file: /test/dummy (actual path: /backing/file)\n")
self.mox.StubOutWithMock(os.path, "exists")
os.path.exists('/test/disk.local').AndReturn(True)
self.mox.StubOutWithMock(utils, "execute")
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/test/disk.local', prlimit = images.QEMU_IMG_LIMITS,
).AndReturn((ret, ''))
self.mox.ReplayAll()
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': [
{'connection_info': conn_info, 'mount_device': '/dev/vdc'},
{'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_instance_disk_info(instance,
block_device_info=info)
info = jsonutils.loads(info)
self.assertEqual(info[0]['type'], 'raw')
self.assertEqual(info[0]['path'], '/test/disk')
self.assertEqual(info[0]['disk_size'], 10737418240)
self.assertEqual(info[0]['backing_file'], "")
self.assertEqual(info[0]['over_committed_disk_size'], 0)
self.assertEqual(info[1]['type'], 'qcow2')
self.assertEqual(info[1]['path'], '/test/disk.local')
self.assertEqual(info[1]['virt_disk_size'], 21474836480)
self.assertEqual(info[1]['backing_file'], "file")
self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
def test_get_instance_disk_info_no_bdinfo_passed(self):
# NOTE(ndipanov): _get_disk_overcomitted_size_total calls this method
# without access to Nova's block device information. We want to make
# sure that we guess volumes mostly correctly in that case as well
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='block'><driver name='qemu' type='raw'/>"
"<source file='/fake/path/to/volume1'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance.name:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_instance_disk_info(instance)
info = jsonutils.loads(info)
self.assertEqual(1, len(info))
self.assertEqual(info[0]['type'], 'raw')
self.assertEqual(info[0]['path'], '/test/disk')
self.assertEqual(info[0]['disk_size'], 10737418240)
self.assertEqual(info[0]['backing_file'], "")
self.assertEqual(info[0]['over_committed_disk_size'], 0)
def test_spawn_with_network_info(self):
# Preparing mocks
def fake_none(*args, **kwargs):
return
def fake_getLibVersion():
return fakelibvirt.FAKE_LIBVIRT_VERSION
def fake_getCapabilities():
return """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
</cpu>
</host>
</capabilities>
"""
def fake_baselineCPU(cpu, flag):
return """<cpu mode='custom' match='exact'>
<model fallback='allow'>Penryn</model>
<vendor>Intel</vendor>
<feature policy='require' name='xtpr'/>
</cpu>
"""
# _fake_network_info must be called before create_fake_libvirt_mock(),
# as _fake_network_info calls importutils.import_class() and
# create_fake_libvirt_mock() mocks importutils.import_class().
network_info = _fake_network_info(self, 1)
self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
getCapabilities=fake_getCapabilities,
getVersion=lambda: 1005001,
baselineCPU=fake_baselineCPU)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
instance = objects.Instance(**instance_ref)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
# Mock out the get_info method of the LibvirtDriver so that the polling
# in the spawn method of the LibvirtDriver returns immediately
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, 'get_info')
libvirt_driver.LibvirtDriver.get_info(instance
).AndReturn(hardware.InstanceInfo(state=power_state.RUNNING))
# Start test
self.mox.ReplayAll()
with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt:
del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr.firewall_driver,
'setup_basic_filtering',
fake_none)
self.stubs.Set(drvr.firewall_driver,
'prepare_instance_filter',
fake_none)
self.stubs.Set(imagebackend.Image,
'cache',
fake_none)
drvr.spawn(self.context, instance, image_meta, [], 'herp',
network_info=network_info)
path = os.path.join(CONF.instances_path, instance['name'])
if os.path.isdir(path):
shutil.rmtree(path)
path = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
if os.path.isdir(path):
shutil.rmtree(os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name))
def test_spawn_without_image_meta(self):
self.create_image_called = False
def fake_none(*args, **kwargs):
return
def fake_create_image(*args, **kwargs):
self.create_image_called = True
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = objects.Instance(**instance_ref)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(drvr, '_create_image', fake_create_image)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
drvr.spawn(self.context, instance,
image_meta, [], None)
self.assertTrue(self.create_image_called)
drvr.spawn(self.context, instance,
image_meta, [], None)
self.assertTrue(self.create_image_called)
def test_spawn_from_volume_calls_cache(self):
self.cache_called_for_disk = False
def fake_none(*args, **kwargs):
return
def fake_cache(*args, **kwargs):
if kwargs.get('image_id') == 'my_fake_image':
self.cache_called_for_disk = True
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(imagebackend.Image, 'cache', fake_cache)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
block_device_info = {'root_device_name': '/dev/vda',
'block_device_mapping': [
{'mount_device': 'vda',
'boot_index': 0}
]
}
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
# Volume-backed instance created without image
instance_ref = self.test_instance
instance_ref['image_ref'] = ''
instance_ref['root_device_name'] = '/dev/vda'
instance_ref['uuid'] = uuidutils.generate_uuid()
instance = objects.Instance(**instance_ref)
drvr.spawn(self.context, instance,
image_meta, [], None,
block_device_info=block_device_info)
self.assertFalse(self.cache_called_for_disk)
# Booted from volume but with placeholder image
instance_ref = self.test_instance
instance_ref['image_ref'] = 'my_fake_image'
instance_ref['root_device_name'] = '/dev/vda'
instance_ref['uuid'] = uuidutils.generate_uuid()
instance = objects.Instance(**instance_ref)
drvr.spawn(self.context, instance,
image_meta, [], None,
block_device_info=block_device_info)
self.assertFalse(self.cache_called_for_disk)
# Booted from an image
instance_ref['image_ref'] = 'my_fake_image'
instance_ref['uuid'] = uuidutils.generate_uuid()
instance = objects.Instance(**instance_ref)
drvr.spawn(self.context, instance,
image_meta, [], None)
self.assertTrue(self.cache_called_for_disk)
def test_start_lxc_from_volume(self):
self.flags(virt_type="lxc",
group='libvirt')
def check_setup_container(image, container_dir=None):
self.assertIsInstance(image, imgmodel.LocalBlockImage)
self.assertEqual(image.path, '/dev/path/to/dev')
return '/dev/nbd1'
bdm = {
'guest_format': None,
'boot_index': 0,
'mount_device': '/dev/sda',
'connection_info': {
'driver_volume_type': 'iscsi',
'serial': 'afc1',
'data': {
'access_mode': 'rw',
'target_discovered': False,
'encrypted': False,
'qos_specs': None,
'target_iqn': 'iqn: volume-afc1',
'target_portal': 'ip: 3260',
'volume_id': 'afc1',
'target_lun': 1,
'auth_password': 'uj',
'auth_username': '47',
'auth_method': 'CHAP'
}
},
'disk_bus': 'scsi',
'device_type': 'disk',
'delete_on_termination': False
}
def _connect_volume_side_effect(connection_info, disk_info):
bdm['connection_info']['data']['device_path'] = '/dev/path/to/dev'
def _get(key, opt=None):
return bdm.get(key, opt)
def getitem(key):
return bdm[key]
def setitem(key, val):
bdm[key] = val
bdm_mock = mock.MagicMock()
bdm_mock.__getitem__.side_effect = getitem
bdm_mock.__setitem__.side_effect = setitem
bdm_mock.get = _get
disk_mock = mock.MagicMock()
disk_mock.source_path = '/dev/path/to/dev'
block_device_info = {'block_device_mapping': [bdm_mock],
'root_device_name': '/dev/sda'}
# Volume-backed instance created without image
instance_ref = self.test_instance
instance_ref['image_ref'] = ''
instance_ref['root_device_name'] = '/dev/sda'
instance_ref['ephemeral_gb'] = 0
instance_ref['uuid'] = uuidutils.generate_uuid()
inst_obj = objects.Instance(**instance_ref)
image_meta = objects.ImageMeta.from_dict({})
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, '_connect_volume',
side_effect=_connect_volume_side_effect),
mock.patch.object(drvr, '_get_volume_config',
return_value=disk_mock),
mock.patch.object(drvr, 'get_info',
return_value=hardware.InstanceInfo(
state=power_state.RUNNING)),
mock.patch('nova.virt.disk.api.setup_container',
side_effect=check_setup_container),
mock.patch('nova.virt.disk.api.teardown_container'),
mock.patch.object(objects.Instance, 'save')):
drvr.spawn(self.context, inst_obj, image_meta, [], None,
network_info=[],
block_device_info=block_device_info)
self.assertEqual('/dev/nbd1',
inst_obj.system_metadata.get(
'rootfs_device_name'))
def test_spawn_with_pci_devices(self):
def fake_none(*args, **kwargs):
return None
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
class FakeLibvirtPciDevice(object):
def dettach(self):
return None
def reset(self):
return None
def fake_node_device_lookup_by_name(address):
pattern = ("pci_%(hex)s{4}_%(hex)s{2}_%(hex)s{2}_%(oct)s{1}"
% dict(hex='[\da-f]', oct='[0-8]'))
pattern = re.compile(pattern)
if pattern.match(address) is None:
raise fakelibvirt.libvirtError()
return FakeLibvirtPciDevice()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(drvr, '_create_image', fake_none)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
drvr._conn.nodeDeviceLookupByName = \
fake_node_device_lookup_by_name
instance_ref = self.test_instance
instance_ref['image_ref'] = 'my_fake_image'
instance = objects.Instance(**instance_ref)
instance['pci_devices'] = objects.PciDeviceList(
objects=[objects.PciDevice(address='0000:00:00.0')])
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
drvr.spawn(self.context, instance,
image_meta, [], None)
def test_chown_disk_config_for_instance(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
self.mox.StubOutWithMock(fake_libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(fake_libvirt_utils, 'chown')
fake_libvirt_utils.get_instance_path(instance).AndReturn('/tmp/uuid')
os.path.exists('/tmp/uuid/disk.config').AndReturn(True)
fake_libvirt_utils.chown('/tmp/uuid/disk.config', os.getuid())
self.mox.ReplayAll()
drvr._chown_disk_config_for_instance(instance)
def _test_create_image_plain(self, os_type='', filename='', mkfs=False):
gotFiles = []
def fake_image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name, is_block_dev=False):
self.path = os.path.join(instance['name'], name)
self.is_block_dev = is_block_dev
def create_image(self, prepare_template, base,
size, *args, **kwargs):
pass
def resize_image(self, size):
pass
def cache(self, fetch_func, filename, size=None,
*args, **kwargs):
gotFiles.append({'filename': filename,
'size': size})
def snapshot(self, name):
pass
return FakeImage(instance, name)
def fake_none(*args, **kwargs):
return
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
# Stop 'libvirt_driver._create_image' touching filesystem
self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
fake_image)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = objects.Instance(**instance_ref)
instance['os_type'] = os_type
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
if mkfs:
self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
{os_type: 'mkfs.ext4 --label %(fs_label)s %(target)s'})
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
drvr._create_image(context, instance, disk_info['mapping'])
drvr._get_guest_xml(self.context, instance, None,
disk_info, image_meta)
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
'size': 10 * units.Gi},
{'filename': filename,
'size': 20 * units.Gi},
]
self.assertEqual(gotFiles, wantFiles)
def test_create_image_plain_os_type_blank(self):
self._test_create_image_plain(os_type='',
filename=self._EPHEMERAL_20_DEFAULT,
mkfs=False)
def test_create_image_plain_os_type_none(self):
self._test_create_image_plain(os_type=None,
filename=self._EPHEMERAL_20_DEFAULT,
mkfs=False)
def test_create_image_plain_os_type_set_no_fs(self):
self._test_create_image_plain(os_type='test',
filename=self._EPHEMERAL_20_DEFAULT,
mkfs=False)
def test_create_image_plain_os_type_set_with_fs(self):
ephemeral_file_name = ('ephemeral_20_%s' % utils.get_hash_str(
'mkfs.ext4 --label %(fs_label)s %(target)s')[:7])
self._test_create_image_plain(os_type='test',
filename=ephemeral_file_name,
mkfs=True)
@mock.patch('nova.virt.libvirt.driver.imagecache')
def test_create_image_initrd(self, mock_imagecache):
INITRD = self._EPHEMERAL_20_DEFAULT + '.initrd'
KERNEL = 'vmlinuz.' + self._EPHEMERAL_20_DEFAULT
mock_imagecache.get_cache_fname.side_effect = \
[KERNEL,
INITRD,
self._EPHEMERAL_20_DEFAULT + '.img']
filename = self._EPHEMERAL_20_DEFAULT
gotFiles = []
outer = self
def fake_image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name, is_block_dev=False):
self.path = os.path.join(instance['name'], name)
self.is_block_dev = is_block_dev
def create_image(self, prepare_template, base,
size, *args, **kwargs):
pass
def cache(self, fetch_func, filename, size=None,
*args, **kwargs):
gotFiles.append({'filename': filename,
'size': size})
if filename == INITRD:
outer.assertEqual(fetch_func,
fake_libvirt_utils.fetch_raw_image)
if filename == KERNEL:
outer.assertEqual(fetch_func,
fake_libvirt_utils.fetch_raw_image)
def resize_image(self, size):
pass
def snapshot(self, name):
pass
return FakeImage(instance, name)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance_ref['kernel_id'] = 2
instance_ref['ramdisk_id'] = 3
instance_ref['os_type'] = 'test'
instance = objects.Instance(**instance_ref)
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(nova.virt.libvirt.imagebackend.Backend,
"image", fake_image),
mock.patch.object(driver, '_get_guest_xml'),
mock.patch.object(driver, '_create_domain_and_network'),
mock.patch.object(driver, 'get_info',
return_value=[hardware.InstanceInfo(state=power_state.RUNNING)])
):
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
driver._create_image(context, instance, disk_info['mapping'])
wantFiles = [
{'filename': KERNEL,
'size': None},
{'filename': INITRD,
'size': None},
{'filename': self._EPHEMERAL_20_DEFAULT + '.img',
'size': 10 * units.Gi},
{'filename': filename,
'size': 20 * units.Gi},
]
self.assertEqual(wantFiles, gotFiles)
def _create_image_helper(self, callback, suffix=''):
gotFiles = []
imported_files = []
def fake_image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name, is_block_dev=False):
self.path = os.path.join(instance['name'], name)
self.is_block_dev = is_block_dev
def create_image(self, prepare_template, base,
size, *args, **kwargs):
pass
def resize_image(self, size):
pass
def cache(self, fetch_func, filename, size=None,
*args, **kwargs):
gotFiles.append({'filename': filename,
'size': size})
def import_file(self, instance, local_filename,
remote_filename):
imported_files.append((local_filename, remote_filename))
def snapshot(self, name):
pass
return FakeImage(instance, name)
def fake_none(*args, **kwargs):
return
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
# Stop 'libvirt_driver._create_image' touching filesystem
self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
fake_image)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
# NOTE(mikal): use this callback to tweak the instance to match
# what you're trying to test
callback(instance_ref)
instance = objects.Instance(**instance_ref)
# Turn on some swap to exercise that codepath in _create_image
instance.flavor.swap = 500
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
self.stubs.Set(instance_metadata, 'InstanceMetadata', fake_none)
self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder,
'make_drive', fake_none)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
drvr._create_image(context, instance, disk_info['mapping'],
suffix=suffix)
drvr._get_guest_xml(self.context, instance, None,
disk_info, image_meta)
return gotFiles, imported_files
def test_create_image_with_swap(self):
def enable_swap(instance_ref):
# Turn on some swap to exercise that codepath in _create_image
instance_ref['system_metadata']['instance_type_swap'] = 500
gotFiles, _ = self._create_image_helper(enable_swap)
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
'size': 10 * units.Gi},
{'filename': self._EPHEMERAL_20_DEFAULT,
'size': 20 * units.Gi},
{'filename': 'swap_500',
'size': 500 * units.Mi},
]
self.assertEqual(gotFiles, wantFiles)
def test_create_image_with_configdrive(self):
def enable_configdrive(instance_ref):
instance_ref['config_drive'] = 'true'
# Ensure that we create a config drive and then import it into the
# image backend store
_, imported_files = self._create_image_helper(enable_configdrive)
self.assertTrue(imported_files[0][0].endswith('/disk.config'))
self.assertEqual('disk.config', imported_files[0][1])
def test_create_image_with_configdrive_rescue(self):
def enable_configdrive(instance_ref):
instance_ref['config_drive'] = 'true'
# Ensure that we create a config drive and then import it into the
# image backend store
_, imported_files = self._create_image_helper(enable_configdrive,
suffix='.rescue')
self.assertTrue(imported_files[0][0].endswith('/disk.config.rescue'))
self.assertEqual('disk.config.rescue', imported_files[0][1])
@mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache',
side_effect=exception.ImageNotFound(image_id='fake-id'))
def test_create_image_not_exist_no_fallback(self, mock_cache):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
self.assertRaises(exception.ImageNotFound,
drvr._create_image,
self.context, instance, disk_info['mapping'])
@mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache')
def test_create_image_not_exist_fallback(self, mock_cache):
def side_effect(fetch_func, filename, size=None, *args, **kwargs):
def second_call(fetch_func, filename, size=None, *args, **kwargs):
# call copy_from_host ourselves because we mocked image.cache()
fetch_func('fake-target', 'fake-max-size')
# further calls have no side effect
mock_cache.side_effect = None
mock_cache.side_effect = second_call
# raise an error only the first call
raise exception.ImageNotFound(image_id='fake-id')
mock_cache.side_effect = side_effect
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
with mock.patch.object(libvirt_driver.libvirt_utils,
'copy_image') as mock_copy:
drvr._create_image(self.context, instance, disk_info['mapping'],
fallback_from_host='fake-source-host')
mock_copy.assert_called_once_with(src='fake-target',
dest='fake-target',
host='fake-source-host',
receive=True)
@mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache')
def test_create_image_resize_snap_backend(self, mock_cache):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock()
drvr.image_backend.image.return_value = drvr.image_backend
instance = objects.Instance(**self.test_instance)
instance.task_state = task_states.RESIZE_FINISH
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
with mock.patch.object(drvr.image_backend, 'create_snap') as mock_crt:
drvr._create_image(self.context, instance, disk_info['mapping'])
mock_crt.assert_called_once_with(
libvirt_utils.RESIZE_SNAPSHOT_NAME)
@mock.patch.object(utils, 'execute')
def test_create_ephemeral_specified_fs(self, mock_exec):
self.flags(default_ephemeral_format='ext3')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True, max_size=20,
specified_fs='ext4')
mock_exec.assert_called_once_with('mkfs', '-t', 'ext4', '-F', '-L',
'myVol', '/dev/something',
run_as_root=True)
def test_create_ephemeral_specified_fs_not_valid(self):
CONF.set_override('default_ephemeral_format', 'ext4')
ephemerals = [{'device_type': 'disk',
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'guest_format': 'dummy',
'size': 1}]
block_device_info = {
'ephemerals': ephemerals}
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = objects.Instance(**instance_ref)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
image_meta = objects.ImageMeta.from_dict({'disk_format': 'raw'})
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
disk_info['mapping'].pop('disk.local')
with test.nested(
mock.patch.object(utils, 'execute'),
mock.patch.object(drvr, 'get_info'),
mock.patch.object(drvr, '_create_domain_and_network'),
mock.patch.object(imagebackend.Image, 'verify_base_size'),
mock.patch.object(imagebackend.Image, 'get_disk_size')):
self.assertRaises(exception.InvalidBDMFormat, drvr._create_image,
context, instance, disk_info['mapping'],
block_device_info=block_device_info)
def test_create_ephemeral_default(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol',
'/dev/something', run_as_root=True)
self.mox.ReplayAll()
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True, max_size=20)
def test_create_ephemeral_with_conf(self):
CONF.set_override('default_ephemeral_format', 'ext4')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol',
'/dev/something', run_as_root=True)
self.mox.ReplayAll()
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True)
def test_create_ephemeral_with_arbitrary(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
{'linux': 'mkfs.ext4 --label %(fs_label)s %(target)s'})
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs.ext4', '--label', 'myVol', '/dev/something',
run_as_root=True)
self.mox.ReplayAll()
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True)
def test_create_ephemeral_with_ext3(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
{'linux': 'mkfs.ext3 --label %(fs_label)s %(target)s'})
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs.ext3', '--label', 'myVol', '/dev/something',
run_as_root=True)
self.mox.ReplayAll()
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True)
def test_create_swap_default(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkswap', '/dev/something', run_as_root=False)
self.mox.ReplayAll()
drvr._create_swap('/dev/something', 1, max_size=20)
def test_get_console_output_file(self):
fake_libvirt_utils.files['console.log'] = '01234567890'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = objects.Instance(**instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
console_log = '%s/console.log' % (console_dir)
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='file'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % console_log
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
try:
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
libvirt_driver.MAX_CONSOLE_BYTES = 5
with mock.patch('os.path.exists', return_value=True):
output = drvr.get_console_output(self.context, instance)
finally:
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
self.assertEqual('67890', output)
def test_get_console_output_file_missing(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = objects.Instance(**instance_ref)
console_log = os.path.join(tmpdir, instance['name'],
'non-existent.log')
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='file'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % console_log
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch('os.path.exists', return_value=False):
output = drvr.get_console_output(self.context, instance)
self.assertEqual('', output)
def test_get_console_output_pty(self):
fake_libvirt_utils.files['pty'] = '01234567890'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = objects.Instance(**instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
pty_file = '%s/fake_pty' % (console_dir)
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='pty'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % pty_file
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
def _fake_flush(self, fake_pty):
return 'foo'
def _fake_append_to_file(self, data, fpath):
return 'pty'
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush
libvirt_driver.LibvirtDriver._append_to_file = _fake_append_to_file
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
try:
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
libvirt_driver.MAX_CONSOLE_BYTES = 5
output = drvr.get_console_output(self.context, instance)
finally:
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
self.assertEqual('67890', output)
@mock.patch('nova.virt.libvirt.host.Host.get_domain')
@mock.patch.object(libvirt_guest.Guest, "get_xml_desc")
def test_get_console_output_not_available(self, mock_get_xml, get_domain):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='foo'>
<source path='srcpath'/>
<target port='0'/>
</console>
</devices>
</domain>
"""
mock_get_xml.return_value = xml
get_domain.return_value = mock.MagicMock()
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ConsoleNotAvailable,
drvr.get_console_output, self.context, instance)
def test_get_host_ip_addr(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ip = drvr.get_host_ip_addr()
self.assertEqual(ip, CONF.my_ip)
@mock.patch.object(libvirt_driver.LOG, 'warn')
@mock.patch('nova.compute.utils.get_machine_ips')
def test_get_host_ip_addr_failure(self, mock_ips, mock_log):
mock_ips.return_value = ['8.8.8.8', '75.75.75.75']
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.get_host_ip_addr()
mock_log.assert_called_once_with(u'my_ip address (%(my_ip)s) was '
u'not found on any of the '
u'interfaces: %(ifaces)s',
{'ifaces': '8.8.8.8, 75.75.75.75',
'my_ip': mock.ANY})
def test_conn_event_handler(self):
self.mox.UnsetStubs()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
with test.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
mock.patch.object(drvr._host, "_init_events",
return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
# verify that the driver registers for the close callback
# and re-connects after receiving the callback
self.assertRaises(exception.HypervisorUnavailable,
drvr.init_host,
"wibble")
self.assertTrue(service_mock.disabled)
def test_command_with_broken_connection(self):
self.mox.UnsetStubs()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
with test.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
mock.patch.object(drvr._host, "_init_events",
return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
drvr.init_host("wibble")
self.assertRaises(exception.HypervisorUnavailable,
drvr.get_num_instances)
self.assertTrue(service_mock.disabled)
def test_service_resume_after_broken_connection(self):
self.mox.UnsetStubs()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = True
with test.nested(
mock.patch.object(drvr._host, "_connect",
return_value=mock.MagicMock()),
mock.patch.object(drvr._host, "_init_events",
return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
drvr.init_host("wibble")
drvr.get_num_instances()
self.assertTrue(not service_mock.disabled and
service_mock.disabled_reason is None)
@mock.patch.object(objects.Instance, 'save')
def test_immediate_delete(self, mock_save):
def fake_get_domain(instance):
raise exception.InstanceNotFound(instance_id=instance.uuid)
def fake_delete_instance_files(instance):
pass
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, 'delete_instance_files',
fake_delete_instance_files)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, {})
mock_save.assert_called_once_with()
@mock.patch.object(objects.Instance, 'get_by_uuid')
@mock.patch.object(objects.Instance, 'obj_load_attr', autospec=True)
@mock.patch.object(objects.Instance, 'save', autospec=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_disconnect_volume')
@mock.patch.object(driver, 'block_device_info_get_mapping')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
def _test_destroy_removes_disk(self, mock_undefine_domain, mock_mapping,
mock_disconnect_volume,
mock_delete_instance_files, mock_destroy,
mock_inst_save, mock_inst_obj_load_attr,
mock_get_by_uuid, volume_fail=False):
instance = objects.Instance(self.context, **self.test_instance)
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
mock_mapping.return_value = vol['block_device_mapping']
mock_delete_instance_files.return_value = True
mock_get_by_uuid.return_value = instance
if volume_fail:
mock_disconnect_volume.return_value = (
exception.VolumeNotFound('vol'))
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.destroy(self.context, instance, [], vol)
def test_destroy_removes_disk(self):
self._test_destroy_removes_disk(volume_fail=False)
def test_destroy_removes_disk_volume_fails(self):
self._test_destroy_removes_disk(volume_fail=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'unplug_vifs')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
def test_destroy_not_removes_disk(self, mock_undefine_domain, mock_destroy,
mock_unplug_vifs):
instance = fake_instance.fake_instance_obj(
None, name='instancename', id=1,
uuid='875a8070-d0b9-4949-8b31-104d125c9a64')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.destroy(self.context, instance, [], None, False)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
@mock.patch.object(host.Host, 'get_domain')
def test_destroy_lxc_calls_teardown_container(self, mock_get_domain,
mock_teardown_container,
mock_cleanup):
self.flags(virt_type='lxc', group='libvirt')
fake_domain = FakeVirtDomain()
def destroy_side_effect(*args, **kwargs):
fake_domain._info[0] = power_state.SHUTDOWN
with mock.patch.object(fake_domain, 'destroy',
side_effect=destroy_side_effect) as mock_domain_destroy:
mock_get_domain.return_value = fake_domain
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = []
drvr.destroy(self.context, instance, network_info, None, False)
mock_get_domain.assert_has_calls([mock.call(instance),
mock.call(instance)])
mock_domain_destroy.assert_called_once_with()
mock_teardown_container.assert_called_once_with(instance)
mock_cleanup.assert_called_once_with(self.context, instance,
network_info, None, False,
None)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
@mock.patch.object(host.Host, 'get_domain')
def test_destroy_lxc_calls_teardown_container_when_no_domain(self,
mock_get_domain, mock_teardown_container, mock_cleanup):
self.flags(virt_type='lxc', group='libvirt')
instance = objects.Instance(**self.test_instance)
inf_exception = exception.InstanceNotFound(instance_id=instance.uuid)
mock_get_domain.side_effect = inf_exception
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = []
drvr.destroy(self.context, instance, network_info, None, False)
mock_get_domain.assert_has_calls([mock.call(instance),
mock.call(instance)])
mock_teardown_container.assert_called_once_with(instance)
mock_cleanup.assert_called_once_with(self.context, instance,
network_info, None, False,
None)
def test_reboot_different_ids(self):
class FakeLoopingCall(object):
def start(self, *a, **k):
return self
def wait(self):
return None
self.flags(wait_soft_reboot_seconds=1, group='libvirt')
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
self.reboot_create_called = False
# Mock domain
mock_domain = self.mox.CreateMock(fakelibvirt.virDomain)
mock_domain.info().AndReturn(
(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple)
mock_domain.ID().AndReturn('some_fake_id')
mock_domain.ID().AndReturn('some_fake_id')
mock_domain.shutdown()
mock_domain.info().AndReturn(
(libvirt_guest.VIR_DOMAIN_CRASHED,) + info_tuple)
mock_domain.ID().AndReturn('some_other_fake_id')
mock_domain.ID().AndReturn('some_other_fake_id')
self.mox.ReplayAll()
def fake_get_domain(instance):
return mock_domain
def fake_create_domain(**kwargs):
self.reboot_create_called = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, '_create_domain', fake_create_domain)
self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
self.stubs.Set(pci_manager, 'get_instance_pci_devs', lambda *a: [])
drvr.reboot(None, instance, [], 'SOFT')
self.assertTrue(self.reboot_create_called)
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, 'get_domain')
def test_reboot_same_ids(self, mock_get_domain, mock_hard_reboot,
mock_sleep, mock_loopingcall,
mock_get_instance_pci_devs):
class FakeLoopingCall(object):
def start(self, *a, **k):
return self
def wait(self):
return None
self.flags(wait_soft_reboot_seconds=1, group='libvirt')
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
self.reboot_hard_reboot_called = False
# Mock domain
mock_domain = mock.Mock(fakelibvirt.virDomain)
return_values = [(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple,
(libvirt_guest.VIR_DOMAIN_CRASHED,) + info_tuple]
mock_domain.info.side_effect = return_values
mock_domain.ID.return_value = 'some_fake_id'
mock_domain.shutdown.side_effect = mock.Mock()
def fake_hard_reboot(*args, **kwargs):
self.reboot_hard_reboot_called = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_get_domain.return_value = mock_domain
mock_hard_reboot.side_effect = fake_hard_reboot
mock_loopingcall.return_value = FakeLoopingCall()
mock_get_instance_pci_devs.return_value = []
drvr.reboot(None, instance, [], 'SOFT')
self.assertTrue(self.reboot_hard_reboot_called)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, 'get_domain')
def test_soft_reboot_libvirt_exception(self, mock_get_domain,
mock_hard_reboot):
# Tests that a hard reboot is performed when a soft reboot results
# in raising a libvirtError.
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
# setup mocks
mock_virDomain = mock.Mock(fakelibvirt.virDomain)
mock_virDomain.info.return_value = (
(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple)
mock_virDomain.ID.return_value = 'some_fake_id'
mock_virDomain.shutdown.side_effect = fakelibvirt.libvirtError('Err')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
context = None
instance = objects.Instance(**self.test_instance)
network_info = []
mock_get_domain.return_value = mock_virDomain
drvr.reboot(context, instance, network_info, 'SOFT')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, 'get_domain')
def _test_resume_state_on_host_boot_with_state(self, state,
mock_get_domain,
mock_hard_reboot):
mock_virDomain = mock.Mock(fakelibvirt.virDomain)
mock_virDomain.info.return_value = ([state, None, None, None, None])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_domain.return_value = mock_virDomain
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
drvr.resume_state_on_host_boot(self.context, instance, network_info,
block_device_info=None)
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
self.assertEqual(mock_hard_reboot.called, state not in ignored_states)
def test_resume_state_on_host_boot_with_running_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
def test_resume_state_on_host_boot_with_suspended_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.SUSPENDED)
def test_resume_state_on_host_boot_with_paused_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.PAUSED)
def test_resume_state_on_host_boot_with_nostate(self):
self._test_resume_state_on_host_boot_with_state(power_state.NOSTATE)
def test_resume_state_on_host_boot_with_shutdown_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
def test_resume_state_on_host_boot_with_crashed_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.CRASHED)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, 'get_domain')
def test_resume_state_on_host_boot_with_instance_not_found_on_driver(
self, mock_get_domain, mock_hard_reboot):
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_domain.side_effect = exception.InstanceNotFound(
instance_id='fake')
drvr.resume_state_on_host_boot(self.context, instance, network_info=[],
block_device_info=None)
mock_hard_reboot.assert_called_once_with(self.context,
instance, [], None)
@mock.patch('nova.virt.libvirt.LibvirtDriver.get_info')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_xml')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info')
@mock.patch('nova.virt.libvirt.blockinfo.get_disk_info')
@mock.patch('nova.virt.libvirt.LibvirtDriver._destroy')
def test_hard_reboot(self, mock_destroy, mock_get_disk_info,
mock_get_instance_disk_info, mock_get_guest_xml,
mock_create_images_and_backing,
mock_create_domain_and_network, mock_get_info):
self.context.auth_token = True # any non-None value will suffice
instance = objects.Instance(**self.test_instance)
instance_path = libvirt_utils.get_instance_path(instance)
network_info = _fake_network_info(self, 1)
block_device_info = None
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
return_values = [hardware.InstanceInfo(state=power_state.SHUTDOWN),
hardware.InstanceInfo(state=power_state.RUNNING)]
mock_get_info.side_effect = return_values
backing_disk_info = [{"virt_disk_size": 2}]
mock_get_disk_info.return_value = mock.sentinel.disk_info
mock_get_guest_xml.return_value = dummyxml
mock_get_instance_disk_info.return_value = backing_disk_info
drvr._hard_reboot(self.context, instance, network_info,
block_device_info)
# make sure that _create_images_and_backing is passed the disk_info
# returned from _get_instance_disk_info and not the one that is in
# scope from blockinfo.get_disk_info
mock_create_images_and_backing.assert_called_once_with(self.context,
instance, instance_path, backing_disk_info)
# make sure that _create_domain_and_network is passed the disk_info
# returned from blockinfo.get_disk_info and not the one that's
# returned from _get_instance_disk_info
mock_create_domain_and_network.assert_called_once_with(self.context,
dummyxml, instance, network_info, mock.sentinel.disk_info,
block_device_info=block_device_info,
reboot=True, vifs_already_plugged=True)
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall')
@mock.patch('nova.pci.manager.get_instance_pci_devs')
@mock.patch('nova.virt.libvirt.LibvirtDriver._prepare_pci_devices_for_use')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info')
@mock.patch('nova.virt.libvirt.utils.write_to_file')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_config')
@mock.patch('nova.virt.libvirt.blockinfo.get_disk_info')
@mock.patch('nova.virt.libvirt.LibvirtDriver._destroy')
def test_hard_reboot_does_not_call_glance_show(self,
mock_destroy, mock_get_disk_info, mock_get_guest_config,
mock_get_instance_path, mock_write_to_file,
mock_get_instance_disk_info, mock_create_images_and_backing,
mock_create_domand_and_network, mock_prepare_pci_devices_for_use,
mock_get_instance_pci_devs, mock_looping_call, mock_ensure_tree):
"""For a hard reboot, we shouldn't need an additional call to glance
to get the image metadata.
This is important for automatically spinning up instances on a
host-reboot, since we won't have a user request context that'll allow
the Glance request to go through. We have to rely on the cached image
metadata, instead.
https://bugs.launchpad.net/nova/+bug/1339386
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
network_info = mock.MagicMock()
block_device_info = mock.MagicMock()
mock_get_disk_info.return_value = {}
mock_get_guest_config.return_value = mock.MagicMock()
mock_get_instance_path.return_value = '/foo'
mock_looping_call.return_value = mock.MagicMock()
drvr._image_api = mock.MagicMock()
drvr._hard_reboot(self.context, instance, network_info,
block_device_info)
self.assertFalse(drvr._image_api.get.called)
mock_ensure_tree.assert_called_once_with('/foo')
def test_suspend(self):
guest = libvirt_guest.Guest(FakeVirtDomain(id=1))
dom = guest._domain
instance = objects.Instance(**self.test_instance)
instance.ephemeral_key_uuid = None
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@mock.patch.object(dmcrypt, 'delete_volume')
@mock.patch.object(conn, '_get_instance_disk_info', return_value=[])
@mock.patch.object(conn, '_detach_sriov_ports')
@mock.patch.object(conn, '_detach_pci_devices')
@mock.patch.object(pci_manager, 'get_instance_pci_devs',
return_value='pci devs')
@mock.patch.object(conn._host, 'get_guest', return_value=guest)
def suspend(mock_get_guest, mock_get_instance_pci_devs,
mock_detach_pci_devices, mock_detach_sriov_ports,
mock_get_instance_disk_info, mock_delete_volume):
mock_managedSave = mock.Mock()
dom.managedSave = mock_managedSave
conn.suspend(self.context, instance)
mock_managedSave.assert_called_once_with(0)
self.assertFalse(mock_get_instance_disk_info.called)
mock_delete_volume.assert_has_calls([mock.call(disk['path'])
for disk in mock_get_instance_disk_info.return_value], False)
suspend()
@mock.patch.object(time, 'sleep')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_create_domain')
@mock.patch.object(host.Host, 'get_domain')
def _test_clean_shutdown(self, mock_get_domain, mock_create_domain,
mock_sleep, seconds_to_shutdown,
timeout, retry_interval,
shutdown_attempts, succeeds):
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
shutdown_count = []
# Mock domain
mock_domain = mock.Mock(fakelibvirt.virDomain)
return_infos = [(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple]
return_shutdowns = [shutdown_count.append("shutdown")]
retry_countdown = retry_interval
for x in range(min(seconds_to_shutdown, timeout)):
return_infos.append(
(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple)
if retry_countdown == 0:
return_shutdowns.append(shutdown_count.append("shutdown"))
retry_countdown = retry_interval
else:
retry_countdown -= 1
if seconds_to_shutdown < timeout:
return_infos.append(
(libvirt_guest.VIR_DOMAIN_SHUTDOWN,) + info_tuple)
mock_domain.info.side_effect = return_infos
mock_domain.shutdown.side_effect = return_shutdowns
def fake_create_domain(**kwargs):
self.reboot_create_called = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_get_domain.return_value = mock_domain
mock_create_domain.side_effect = fake_create_domain
result = drvr._clean_shutdown(instance, timeout, retry_interval)
self.assertEqual(succeeds, result)
self.assertEqual(shutdown_attempts, len(shutdown_count))
def test_clean_shutdown_first_time(self):
self._test_clean_shutdown(seconds_to_shutdown=2,
timeout=5,
retry_interval=3,
shutdown_attempts=1,
succeeds=True)
def test_clean_shutdown_with_retry(self):
self._test_clean_shutdown(seconds_to_shutdown=4,
timeout=5,
retry_interval=3,
shutdown_attempts=2,
succeeds=True)
def test_clean_shutdown_failure(self):
self._test_clean_shutdown(seconds_to_shutdown=6,
timeout=5,
retry_interval=3,
shutdown_attempts=2,
succeeds=False)
def test_clean_shutdown_no_wait(self):
self._test_clean_shutdown(seconds_to_shutdown=6,
timeout=0,
retry_interval=3,
shutdown_attempts=1,
succeeds=False)
@mock.patch.object(FakeVirtDomain, 'attachDeviceFlags')
@mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
@mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=None)
def test_attach_sriov_ports(self,
mock_get_image_metadata,
mock_ID,
mock_attachDevice):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
guest = libvirt_guest.Guest(FakeVirtDomain())
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._attach_sriov_ports(self.context, instance, guest, network_info)
mock_get_image_metadata.assert_called_once_with(
instance.system_metadata)
self.assertTrue(mock_attachDevice.called)
@mock.patch.object(FakeVirtDomain, 'attachDeviceFlags')
@mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
@mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=None)
def test_attach_sriov_ports_with_info_cache(self,
mock_get_image_metadata,
mock_ID,
mock_attachDevice):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
guest = libvirt_guest.Guest(FakeVirtDomain())
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._attach_sriov_ports(self.context, instance, guest, None)
mock_get_image_metadata.assert_called_once_with(
instance.system_metadata)
self.assertTrue(mock_attachDevice.called)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def _test_detach_sriov_ports(self,
mock_has_min_version, vif_type):
instance = objects.Instance(**self.test_instance)
expeted_pci_slot = "0000:00:00.0"
network_info = _fake_network_info(self, 1)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
# some more adjustments for the fake network_info so that
# the correct get_config function will be executed (vif's
# get_config_hw_veb - which is according to the real SRIOV vif)
# and most importantly the pci_slot which is translated to
# cfg.source_dev, then to PciDevice.address and sent to
# _detach_pci_devices
network_info[0]['profile'] = dict(pci_slot=expeted_pci_slot)
network_info[0]['type'] = vif_type
network_info[0]['details'] = dict(vlan="2145")
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
# fill the pci_devices of the instance so that
# pci_manager.get_instance_pci_devs will not return an empty list
# which will eventually fail the assertion for detachDeviceFlags
expected_pci_device_obj = (
objects.PciDevice(address=expeted_pci_slot, request_id=None))
instance.pci_devices = objects.PciDeviceList()
instance.pci_devices.objects = [expected_pci_device_obj]
domain = FakeVirtDomain()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
guest = libvirt_guest.Guest(domain)
with mock.patch.object(drvr, '_detach_pci_devices') as mock_detach_pci:
drvr._detach_sriov_ports(self.context, instance, guest)
mock_detach_pci.assert_called_once_with(
guest, [expected_pci_device_obj])
def test_detach_sriov_ports_interface_interface_hostdev(self):
# Note: test detach_sriov_ports method for vif with config
# LibvirtConfigGuestInterface
self._test_detach_sriov_ports(vif_type="hw_veb")
def test_detach_sriov_ports_interface_pci_hostdev(self):
# Note: test detach_sriov_ports method for vif with config
# LibvirtConfigGuestHostdevPCI
self._test_detach_sriov_ports(vif_type="ib_hostdev")
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(FakeVirtDomain, 'detachDeviceFlags')
def test_detach_duplicate_mac_sriov_ports(self,
mock_detachDeviceFlags,
mock_has_min_version):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 2)
for network_info_inst in network_info:
network_info_inst['vnic_type'] = network_model.VNIC_TYPE_DIRECT
network_info_inst['type'] = "hw_veb"
network_info_inst['details'] = dict(vlan="2145")
network_info_inst['address'] = "fa:16:3e:96:2a:48"
network_info[0]['profile'] = dict(pci_slot="0000:00:00.0")
network_info[1]['profile'] = dict(pci_slot="0000:00:00.1")
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
# fill the pci_devices of the instance so that
# pci_manager.get_instance_pci_devs will not return an empty list
# which will eventually fail the assertion for detachDeviceFlags
instance.pci_devices = objects.PciDeviceList()
instance.pci_devices.objects = [
objects.PciDevice(address='0000:00:00.0', request_id=None),
objects.PciDevice(address='0000:00:00.1', request_id=None)
]
domain = FakeVirtDomain()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
guest = libvirt_guest.Guest(domain)
drvr._detach_sriov_ports(self.context, instance, guest)
expected_xml = [
('<hostdev mode="subsystem" type="pci" managed="yes">\n'
' <source>\n'
' <address bus="0x00" domain="0x0000" \
function="0x0" slot="0x00"/>\n'
' </source>\n'
'</hostdev>\n'),
('<hostdev mode="subsystem" type="pci" managed="yes">\n'
' <source>\n'
' <address bus="0x00" domain="0x0000" \
function="0x1" slot="0x00"/>\n'
' </source>\n'
'</hostdev>\n')
]
mock_detachDeviceFlags.has_calls([
mock.call(expected_xml[0], flags=1),
mock.call(expected_xml[1], flags=1)
])
def test_resume(self):
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
block_device_info = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
guest = libvirt_guest.Guest('fake_dom')
with test.nested(
mock.patch.object(drvr, '_get_existing_domain_xml',
return_value=dummyxml),
mock.patch.object(drvr, '_create_domain_and_network',
return_value=guest),
mock.patch.object(drvr, '_attach_pci_devices'),
mock.patch.object(pci_manager, 'get_instance_pci_devs',
return_value='fake_pci_devs'),
mock.patch.object(utils, 'get_image_from_system_metadata'),
mock.patch.object(blockinfo, 'get_disk_info'),
) as (_get_existing_domain_xml, _create_domain_and_network,
_attach_pci_devices, get_instance_pci_devs, get_image_metadata,
get_disk_info):
get_image_metadata.return_value = {'bar': 234}
disk_info = {'foo': 123}
get_disk_info.return_value = disk_info
drvr.resume(self.context, instance, network_info,
block_device_info)
_get_existing_domain_xml.assert_has_calls([mock.call(instance,
network_info, block_device_info)])
_create_domain_and_network.assert_has_calls([mock.call(
self.context, dummyxml,
instance, network_info, disk_info,
block_device_info=block_device_info,
vifs_already_plugged=True)])
_attach_pci_devices.assert_has_calls([mock.call(guest,
'fake_pci_devs')])
@mock.patch.object(host.Host, 'get_domain')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files')
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines(self, mock_save, mock_delete_instance_files,
mock_get_info, mock_get_domain):
dom_mock = mock.MagicMock()
dom_mock.undefineFlags.return_value = 1
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_domain.return_value = dom_mock
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.SHUTDOWN, id=-1)
mock_delete_instance_files.return_value = None
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
mock_save.assert_called_once_with()
@mock.patch.object(rbd_utils, 'RBDDriver')
def test_cleanup_rbd(self, mock_driver):
driver = mock_driver.return_value
driver.cleanup_volumes = mock.Mock()
fake_instance = {'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr._cleanup_rbd(fake_instance)
driver.cleanup_volumes.assert_called_once_with(fake_instance)
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines_no_undefine_flags(self, mock_save):
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(fakelibvirt.libvirtError('Err'))
mock.ID().AndReturn(123)
mock.undefine()
self.mox.ReplayAll()
def fake_get_domain(instance):
return mock
def fake_get_info(instance_name):
return hardware.InstanceInfo(state=power_state.SHUTDOWN, id=-1)
def fake_delete_instance_files(instance):
return None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, 'get_info', fake_get_info)
self.stubs.Set(drvr, 'delete_instance_files',
fake_delete_instance_files)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
mock_save.assert_called_once_with()
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines_no_attribute_with_managed_save(self, mock_save):
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(AttributeError())
mock.hasManagedSaveImage(0).AndReturn(True)
mock.managedSaveRemove(0)
mock.undefine()
self.mox.ReplayAll()
def fake_get_domain(instance):
return mock
def fake_get_info(instance_name):
return hardware.InstanceInfo(state=power_state.SHUTDOWN, id=-1)
def fake_delete_instance_files(instance):
return None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, 'get_info', fake_get_info)
self.stubs.Set(drvr, 'delete_instance_files',
fake_delete_instance_files)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
mock_save.assert_called_once_with()
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines_no_attribute_no_managed_save(self, mock_save):
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(AttributeError())
mock.hasManagedSaveImage(0).AndRaise(AttributeError())
mock.undefine()
self.mox.ReplayAll()
def fake_get_domain(self, instance):
return mock
def fake_get_info(instance_name):
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
def fake_delete_instance_files(instance):
return None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(host.Host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, 'get_info', fake_get_info)
self.stubs.Set(drvr, 'delete_instance_files',
fake_delete_instance_files)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
mock_save.assert_called_once_with()
def test_destroy_timed_out(self):
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy().AndRaise(fakelibvirt.libvirtError("timed out"))
self.mox.ReplayAll()
def fake_get_domain(self, instance):
return mock
def fake_get_error_code(self):
return fakelibvirt.VIR_ERR_OPERATION_TIMEOUT
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(host.Host, 'get_domain', fake_get_domain)
self.stubs.Set(fakelibvirt.libvirtError, 'get_error_code',
fake_get_error_code)
instance = objects.Instance(**self.test_instance)
self.assertRaises(exception.InstancePowerOffFailure,
drvr.destroy, self.context, instance, [])
def test_private_destroy_not_found(self):
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"No such domain",
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN)
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy().AndRaise(ex)
mock.info().AndRaise(ex)
mock.UUIDString()
self.mox.ReplayAll()
def fake_get_domain(instance):
return mock
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
instance = objects.Instance(**self.test_instance)
# NOTE(vish): verifies destroy doesn't raise if the instance disappears
drvr._destroy(instance)
def test_private_destroy_lxc_processes_refused_to_die(self):
self.flags(virt_type='lxc', group='libvirt')
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError, "",
error_message="internal error: Some processes refused to die",
error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(conn._host, 'get_domain') as mock_get_domain, \
mock.patch.object(conn, 'get_info') as mock_get_info:
mock_domain = mock.MagicMock()
mock_domain.ID.return_value = 1
mock_get_domain.return_value = mock_domain
mock_domain.destroy.side_effect = ex
mock_info = mock.MagicMock()
mock_info.id = 1
mock_info.state = power_state.SHUTDOWN
mock_get_info.return_value = mock_info
instance = objects.Instance(**self.test_instance)
conn._destroy(instance)
def test_private_destroy_processes_refused_to_die_still_raises(self):
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError, "",
error_message="internal error: Some processes refused to die",
error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(conn._host, 'get_domain') as mock_get_domain:
mock_domain = mock.MagicMock()
mock_domain.ID.return_value = 1
mock_get_domain.return_value = mock_domain
mock_domain.destroy.side_effect = ex
instance = objects.Instance(**self.test_instance)
self.assertRaises(fakelibvirt.libvirtError, conn._destroy,
instance)
def test_private_destroy_ebusy_timeout(self):
# Tests that _destroy will retry 3 times to destroy the guest when an
# EBUSY is raised, but eventually times out and raises the libvirtError
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
("Failed to terminate process 26425 with SIGKILL: "
"Device or resource busy"),
error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR,
int1=errno.EBUSY)
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
mock_guest.poweroff = mock.Mock(side_effect=ex)
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr._host, 'get_guest',
return_value=mock_guest):
self.assertRaises(fakelibvirt.libvirtError, drvr._destroy,
instance)
self.assertEqual(3, mock_guest.poweroff.call_count)
def test_private_destroy_ebusy_multiple_attempt_ok(self):
# Tests that the _destroy attempt loop is broken when EBUSY is no
# longer raised.
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
("Failed to terminate process 26425 with SIGKILL: "
"Device or resource busy"),
error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR,
int1=errno.EBUSY)
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
mock_guest.poweroff = mock.Mock(side_effect=[ex, None])
inst_info = hardware.InstanceInfo(power_state.SHUTDOWN, id=1)
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr._host, 'get_guest',
return_value=mock_guest):
with mock.patch.object(drvr, 'get_info', return_value=inst_info):
drvr._destroy(instance)
self.assertEqual(2, mock_guest.poweroff.call_count)
def test_undefine_domain_with_not_found_instance(self):
def fake_get_domain(self, instance):
raise exception.InstanceNotFound(instance_id=instance.uuid)
self.stubs.Set(host.Host, 'get_domain', fake_get_domain)
self.mox.StubOutWithMock(fakelibvirt.libvirtError, "get_error_code")
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
# NOTE(wenjianhn): verifies undefine doesn't raise if the
# instance disappears
drvr._undefine_domain(instance)
@mock.patch.object(host.Host, "list_instance_domains")
@mock.patch.object(objects.BlockDeviceMappingList, "bdms_by_instance_uuid")
@mock.patch.object(objects.InstanceList, "get_by_filters")
def test_disk_over_committed_size_total(self, mock_get, mock_bdms,
mock_list):
# Ensure destroy calls managedSaveRemove for saved instance.
class DiagFakeDomain(object):
def __init__(self, name):
self._name = name
self._uuid = str(uuid.uuid4())
def ID(self):
return 1
def name(self):
return self._name
def UUIDString(self):
return self._uuid
def XMLDesc(self, flags):
return "<domain/>"
instance_domains = [
DiagFakeDomain("instance0000001"),
DiagFakeDomain("instance0000002")]
mock_list.return_value = instance_domains
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
fake_disks = {'instance0000001':
[{'type': 'qcow2', 'path': '/somepath/disk1',
'virt_disk_size': '10737418240',
'backing_file': '/somepath/disk1',
'disk_size': '83886080',
'over_committed_disk_size': '10653532160'}],
'instance0000002':
[{'type': 'raw', 'path': '/somepath/disk2',
'virt_disk_size': '0',
'backing_file': '/somepath/disk2',
'disk_size': '10737418240',
'over_committed_disk_size': '0'}]}
def get_info(instance_name, xml, **kwargs):
return fake_disks.get(instance_name)
instance_uuids = [dom.UUIDString() for dom in instance_domains]
instances = [objects.Instance(
uuid=instance_uuids[0],
root_device_name='/dev/vda'),
objects.Instance(
uuid=instance_uuids[1],
root_device_name='/dev/vdb')
]
mock_get.return_value = instances
with mock.patch.object(drvr,
"_get_instance_disk_info") as mock_info:
mock_info.side_effect = get_info
result = drvr._get_disk_over_committed_size_total()
self.assertEqual(result, 10653532160)
mock_list.assert_called_once_with()
self.assertEqual(2, mock_info.call_count)
filters = {'uuid': instance_uuids}
mock_get.assert_called_once_with(mock.ANY, filters, use_slave=True)
mock_bdms.assert_called_with(mock.ANY, instance_uuids)
@mock.patch.object(host.Host, "list_instance_domains")
@mock.patch.object(objects.BlockDeviceMappingList, "bdms_by_instance_uuid")
@mock.patch.object(objects.InstanceList, "get_by_filters")
def test_disk_over_committed_size_total_eperm(self, mock_get, mock_bdms,
mock_list):
# Ensure destroy calls managedSaveRemove for saved instance.
class DiagFakeDomain(object):
def __init__(self, name):
self._name = name
self._uuid = str(uuid.uuid4())
def ID(self):
return 1
def name(self):
return self._name
def UUIDString(self):
return self._uuid
def XMLDesc(self, flags):
return "<domain/>"
instance_domains = [
DiagFakeDomain("instance0000001"),
DiagFakeDomain("instance0000002")]
mock_list.return_value = instance_domains
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
fake_disks = {'instance0000001':
[{'type': 'qcow2', 'path': '/somepath/disk1',
'virt_disk_size': '10737418240',
'backing_file': '/somepath/disk1',
'disk_size': '83886080',
'over_committed_disk_size': '10653532160'}],
'instance0000002':
[{'type': 'raw', 'path': '/somepath/disk2',
'virt_disk_size': '0',
'backing_file': '/somepath/disk2',
'disk_size': '10737418240',
'over_committed_disk_size': '21474836480'}]}
def side_effect(name, dom, block_device_info):
if name == 'instance0000001':
self.assertEqual('/dev/vda',
block_device_info['root_device_name'])
raise OSError(errno.EACCES, 'Permission denied')
if name == 'instance0000002':
self.assertEqual('/dev/vdb',
block_device_info['root_device_name'])
return fake_disks.get(name)
get_disk_info = mock.Mock()
get_disk_info.side_effect = side_effect
drvr._get_instance_disk_info = get_disk_info
instance_uuids = [dom.UUIDString() for dom in instance_domains]
instances = [objects.Instance(
uuid=instance_uuids[0],
root_device_name='/dev/vda'),
objects.Instance(
uuid=instance_uuids[1],
root_device_name='/dev/vdb')
]
mock_get.return_value = instances
result = drvr._get_disk_over_committed_size_total()
self.assertEqual(21474836480, result)
mock_list.assert_called_once_with()
self.assertEqual(2, get_disk_info.call_count)
filters = {'uuid': instance_uuids}
mock_get.assert_called_once_with(mock.ANY, filters, use_slave=True)
mock_bdms.assert_called_with(mock.ANY, instance_uuids)
@mock.patch.object(host.Host, "list_instance_domains",
return_value=[mock.MagicMock(name='foo')])
@mock.patch.object(libvirt_driver.LibvirtDriver, "_get_instance_disk_info",
side_effect=exception.VolumeBDMPathNotFound(path='bar'))
@mock.patch.object(objects.BlockDeviceMappingList, "bdms_by_instance_uuid")
@mock.patch.object(objects.InstanceList, "get_by_filters")
def test_disk_over_committed_size_total_bdm_not_found(self,
mock_get,
mock_bdms,
mock_get_disk_info,
mock_list_domains):
# Tests that we handle VolumeBDMPathNotFound gracefully.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(0, drvr._get_disk_over_committed_size_total())
def test_cpu_info(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigCPU()
cpu.model = "Opteron_G4"
cpu.vendor = "AMD"
cpu.arch = arch.X86_64
cpu.cells = 1
cpu.cores = 2
cpu.threads = 1
cpu.sockets = 4
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic"))
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow"))
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = arch.X86_64
guest.domtype = ["kvm"]
caps.guests.append(guest)
guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = arch.I686
guest.domtype = ["kvm"]
caps.guests.append(guest)
return caps
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
want = {"vendor": "AMD",
"features": set(["extapic", "3dnow"]),
"model": "Opteron_G4",
"arch": arch.X86_64,
"topology": {"cells": 1, "cores": 2, "threads": 1,
"sockets": 4}}
got = drvr._get_cpu_info()
self.assertEqual(want, got)
def test_get_pcidev_info(self):
def fake_nodeDeviceLookupByName(self, name):
return FakeNodeDevice(_fake_NodeDevXml[name])
self.mox.StubOutWithMock(host.Host, 'device_lookup_by_name')
host.Host.device_lookup_by_name = fake_nodeDeviceLookupByName
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(
fakelibvirt.Connection, 'getLibVersion') as mock_lib_version:
mock_lib_version.return_value = (
versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION) - 1)
actualvf = drvr._get_pcidev_info("pci_0000_04_00_3")
expect_vf = {
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:00.3",
"product_id": '1521',
"numa_node": None,
"vendor_id": '8086',
"label": 'label_8086_1521',
"dev_type": fields.PciDeviceType.SRIOV_PF,
}
self.assertEqual(expect_vf, actualvf)
actualvf = drvr._get_pcidev_info("pci_0000_04_10_7")
expect_vf = {
"dev_id": "pci_0000_04_10_7",
"address": "0000:04:10.7",
"product_id": '1520',
"numa_node": None,
"vendor_id": '8086',
"label": 'label_8086_1520',
"dev_type": fields.PciDeviceType.SRIOV_VF,
"parent_addr": '0000:04:00.3',
}
self.assertEqual(expect_vf, actualvf)
actualvf = drvr._get_pcidev_info("pci_0000_04_11_7")
expect_vf = {
"dev_id": "pci_0000_04_11_7",
"address": "0000:04:11.7",
"product_id": '1520',
"vendor_id": '8086',
"numa_node": 0,
"label": 'label_8086_1520',
"dev_type": fields.PciDeviceType.SRIOV_VF,
"parent_addr": '0000:04:00.3',
}
self.assertEqual(expect_vf, actualvf)
with mock.patch.object(
pci_utils, 'is_physical_function', return_value=True):
actualvf = drvr._get_pcidev_info("pci_0000_04_00_1")
expect_vf = {
"dev_id": "pci_0000_04_00_1",
"address": "0000:04:00.1",
"product_id": '1013',
"numa_node": 0,
"vendor_id": '15b3',
"label": 'label_15b3_1013',
"dev_type": fields.PciDeviceType.SRIOV_PF,
}
self.assertEqual(expect_vf, actualvf)
with mock.patch.object(
pci_utils, 'is_physical_function', return_value=False):
actualvf = drvr._get_pcidev_info("pci_0000_04_00_1")
expect_vf = {
"dev_id": "pci_0000_04_00_1",
"address": "0000:04:00.1",
"product_id": '1013',
"numa_node": 0,
"vendor_id": '15b3',
"label": 'label_15b3_1013',
"dev_type": fields.PciDeviceType.STANDARD,
}
self.assertEqual(expect_vf, actualvf)
with mock.patch.object(
fakelibvirt.Connection, 'getLibVersion') as mock_lib_version:
mock_lib_version.return_value = (
versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION))
actualvf = drvr._get_pcidev_info("pci_0000_03_00_0")
expect_vf = {
"dev_id": "pci_0000_03_00_0",
"address": "0000:03:00.0",
"product_id": '1013',
"numa_node": 0,
"vendor_id": '15b3',
"label": 'label_15b3_1013',
"dev_type": fields.PciDeviceType.SRIOV_PF,
}
self.assertEqual(expect_vf, actualvf)
actualvf = drvr._get_pcidev_info("pci_0000_03_00_1")
expect_vf = {
"dev_id": "pci_0000_03_00_1",
"address": "0000:03:00.1",
"product_id": '1013',
"numa_node": 0,
"vendor_id": '15b3',
"label": 'label_15b3_1013',
"dev_type": fields.PciDeviceType.SRIOV_PF,
}
self.assertEqual(expect_vf, actualvf)
def test_list_devices_not_supported(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Handle just the NO_SUPPORT error
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virNodeNumOfDevices',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
with mock.patch.object(drvr._conn, 'listDevices',
side_effect=not_supported_exc):
self.assertEqual('[]', drvr._get_pci_passthrough_devices())
# We cache not supported status to avoid emitting too many logging
# messages. Clear this value to test the other exception case.
del drvr._list_devices_supported
# Other errors should not be caught
other_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'other exc',
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN)
with mock.patch.object(drvr._conn, 'listDevices',
side_effect=other_exc):
self.assertRaises(fakelibvirt.libvirtError,
drvr._get_pci_passthrough_devices)
def test_get_pci_passthrough_devices(self):
def fakelistDevices(caps, fakeargs=0):
return ['pci_0000_04_00_3', 'pci_0000_04_10_7',
'pci_0000_04_11_7']
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.listDevices = fakelistDevices
def fake_nodeDeviceLookupByName(self, name):
return FakeNodeDevice(_fake_NodeDevXml[name])
self.mox.StubOutWithMock(host.Host, 'device_lookup_by_name')
host.Host.device_lookup_by_name = fake_nodeDeviceLookupByName
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actjson = drvr._get_pci_passthrough_devices()
expectvfs = [
{
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:00.3",
"product_id": '1521',
"vendor_id": '8086',
"dev_type": fields.PciDeviceType.SRIOV_PF,
"phys_function": None,
"numa_node": None},
{
"dev_id": "pci_0000_04_10_7",
"domain": 0,
"address": "0000:04:10.7",
"product_id": '1520',
"vendor_id": '8086',
"numa_node": None,
"dev_type": fields.PciDeviceType.SRIOV_VF,
"phys_function": [('0x0000', '0x04', '0x00', '0x3')]},
{
"dev_id": "pci_0000_04_11_7",
"domain": 0,
"address": "0000:04:11.7",
"product_id": '1520',
"vendor_id": '8086',
"numa_node": 0,
"dev_type": fields.PciDeviceType.SRIOV_VF,
"phys_function": [('0x0000', '0x04', '0x00', '0x3')],
}
]
actualvfs = jsonutils.loads(actjson)
for dev in range(len(actualvfs)):
for key in actualvfs[dev].keys():
if key not in ['phys_function', 'virt_functions', 'label']:
self.assertEqual(expectvfs[dev][key], actualvfs[dev][key])
def _fake_caps_numa_topology(self,
cells_per_host=4,
sockets_per_cell=1,
cores_per_socket=1,
threads_per_core=2,
kb_mem=1048576):
# Generate mempages list per cell
cell_mempages = list()
for cellid in range(cells_per_host):
mempages_0 = vconfig.LibvirtConfigCapsNUMAPages()
mempages_0.size = 4
mempages_0.total = 1024 * cellid
mempages_1 = vconfig.LibvirtConfigCapsNUMAPages()
mempages_1.size = 2048
mempages_1.total = 0 + cellid
cell_mempages.append([mempages_0, mempages_1])
topology = fakelibvirt.HostInfo._gen_numa_topology(cells_per_host,
sockets_per_cell,
cores_per_socket,
threads_per_core,
kb_mem=kb_mem,
numa_mempages_list=cell_mempages)
return topology
def _test_get_host_numa_topology(self, mempages):
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = arch.X86_64
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
expected_topo_dict = {'cells': [
{'cpus': '0,1', 'cpu_usage': 0,
'mem': {'total': 256, 'used': 0},
'id': 0},
{'cpus': '3', 'cpu_usage': 0,
'mem': {'total': 256, 'used': 0},
'id': 1},
{'cpus': '', 'cpu_usage': 0,
'mem': {'total': 256, 'used': 0},
'id': 2},
{'cpus': '', 'cpu_usage': 0,
'mem': {'total': 256, 'used': 0},
'id': 3}]}
with test.nested(
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set',
return_value=set([0, 1, 3, 4, 5])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set([0, 1, 2, 3, 6])),
):
got_topo = drvr._get_host_numa_topology()
got_topo_dict = got_topo._to_dict()
self.assertThat(
expected_topo_dict, matchers.DictMatches(got_topo_dict))
if mempages:
# cells 0
self.assertEqual(4, got_topo.cells[0].mempages[0].size_kb)
self.assertEqual(0, got_topo.cells[0].mempages[0].total)
self.assertEqual(2048, got_topo.cells[0].mempages[1].size_kb)
self.assertEqual(0, got_topo.cells[0].mempages[1].total)
# cells 1
self.assertEqual(4, got_topo.cells[1].mempages[0].size_kb)
self.assertEqual(1024, got_topo.cells[1].mempages[0].total)
self.assertEqual(2048, got_topo.cells[1].mempages[1].size_kb)
self.assertEqual(1, got_topo.cells[1].mempages[1].total)
else:
self.assertEqual([], got_topo.cells[0].mempages)
self.assertEqual([], got_topo.cells[1].mempages)
self.assertEqual(expected_topo_dict, got_topo_dict)
self.assertEqual(set([]), got_topo.cells[0].pinned_cpus)
self.assertEqual(set([]), got_topo.cells[1].pinned_cpus)
self.assertEqual(set([]), got_topo.cells[2].pinned_cpus)
self.assertEqual(set([]), got_topo.cells[3].pinned_cpus)
self.assertEqual([set([0, 1])], got_topo.cells[0].siblings)
self.assertEqual([], got_topo.cells[1].siblings)
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
def test_get_host_numa_topology(self, mock_version):
self._test_get_host_numa_topology(mempages=True)
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
def test_get_host_numa_topology_no_mempages(self, mock_lib_version,
mock_version, mock_type):
self.flags(virt_type='kvm', group='libvirt')
mock_lib_version.return_value = versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION) - 1
mock_version.return_value = versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION)
mock_type.return_value = host.HV_DRIVER_QEMU
self._test_get_host_numa_topology(mempages=False)
def test_get_host_numa_topology_empty(self):
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = arch.X86_64
caps.host.topology = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(host.Host, 'has_min_version', return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps)
) as (has_min_version, get_caps):
self.assertIsNone(drvr._get_host_numa_topology())
self.assertEqual(2, get_caps.call_count)
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
def test_get_host_numa_topology_old_version(self, mock_lib_version,
mock_version, mock_type):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_lib_version.return_value = versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) - 1
mock_version.return_value = versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION)
mock_type.return_value = host.HV_DRIVER_QEMU
self.assertIsNone(drvr._get_host_numa_topology())
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
def test_get_host_numa_topology_xen(self, mock_lib_version,
mock_version, mock_type):
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_lib_version.return_value = versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION)
mock_version.return_value = versionutils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION)
mock_type.return_value = host.HV_DRIVER_XEN
self.assertIsNone(drvr._get_host_numa_topology())
def test_diagnostic_vcpus_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
raise fakelibvirt.libvirtError('vcpus missing')
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
self.useFixture(utils_fixture.TimeFixture(diags_time))
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
def test_diagnostic_blockstats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
raise fakelibvirt.libvirtError('blockStats missing')
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
self.useFixture(utils_fixture.TimeFixture(diags_time))
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
def test_diagnostic_interfacestats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
raise fakelibvirt.libvirtError('interfaceStat missing')
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
self.useFixture(utils_fixture.TimeFixture(diags_time))
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
def test_diagnostic_memorystats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
raise fakelibvirt.libvirtError('memoryStats missing')
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
self.useFixture(utils_fixture.TimeFixture(diags_time))
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
def test_diagnostic_full(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
self.useFixture(utils_fixture.TimeFixture(diags_time))
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
@mock.patch.object(host.Host, 'get_domain')
def test_diagnostic_full_with_multiple_interfaces(self, mock_get_domain):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
<interface type="bridge">
<mac address="53:55:00:a5:39:39"/>
<model type="virtio"/>
<target dev="br0"/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self):
return DiagFakeDomain()
mock_get_domain.side_effect = fake_get_domain
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
'br0_rx': 4408,
'br0_rx_drop': 0,
'br0_rx_errors': 0,
'br0_rx_packets': 82,
'br0_tx': 0,
'br0_tx_drop': 0,
'br0_tx_errors': 0,
'br0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
self.useFixture(utils_fixture.TimeFixture(diags_time))
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0},
{'mac_address': '53:55:00:a5:39:39',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10.,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
@mock.patch.object(host.Host, "list_instance_domains")
def test_failing_vcpu_count(self, mock_list):
"""Domain can fail to return the vcpu description in case it's
just starting up or shutting down. Make sure None is handled
gracefully.
"""
class DiagFakeDomain(object):
def __init__(self, vcpus):
self._vcpus = vcpus
def vcpus(self):
if self._vcpus is None:
raise fakelibvirt.libvirtError("fake-error")
else:
return ([[1, 2, 3, 4]] * self._vcpus, [True] * self._vcpus)
def ID(self):
return 1
def name(self):
return "instance000001"
def UUIDString(self):
return "19479fee-07a5-49bb-9138-d3738280d63c"
mock_list.return_value = [
DiagFakeDomain(None), DiagFakeDomain(5)]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(5, drvr._get_vcpu_used())
mock_list.assert_called_with(only_guests=True, only_running=True)
@mock.patch.object(host.Host, "list_instance_domains")
def test_failing_vcpu_count_none(self, mock_list):
"""Domain will return zero if the current number of vcpus used
is None. This is in case of VM state starting up or shutting
down. None type returned is counted as zero.
"""
class DiagFakeDomain(object):
def __init__(self):
pass
def vcpus(self):
return None
def ID(self):
return 1
def name(self):
return "instance000001"
mock_list.return_value = [DiagFakeDomain()]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(0, drvr._get_vcpu_used())
mock_list.assert_called_with(only_guests=True, only_running=True)
def test_get_instance_capabilities(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
caps = vconfig.LibvirtConfigCaps()
guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = arch.X86_64
guest.domtype = ['kvm', 'qemu']
caps.guests.append(guest)
guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = arch.I686
guest.domtype = ['kvm']
caps.guests.append(guest)
return caps
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
want = [(arch.X86_64, 'kvm', 'hvm'),
(arch.X86_64, 'qemu', 'hvm'),
(arch.I686, 'kvm', 'hvm')]
got = drvr._get_instance_capabilities()
self.assertEqual(want, got)
def test_set_cache_mode(self):
self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
drvr._set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'directsync')
def test_set_cache_mode_invalid_mode(self):
self.flags(disk_cachemodes=['file=FAKE'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
drvr._set_cache_mode(fake_conf)
self.assertIsNone(fake_conf.driver_cache)
def test_set_cache_mode_invalid_object(self):
self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuest()
fake_conf.driver_cache = 'fake'
drvr._set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'fake')
@mock.patch('os.unlink')
@mock.patch.object(os.path, 'exists')
def _test_shared_storage_detection(self, is_same,
mock_exists, mock_unlink):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.get_host_ip_addr = mock.MagicMock(return_value='bar')
mock_exists.return_value = is_same
with test.nested(
mock.patch.object(drvr._remotefs, 'create_file'),
mock.patch.object(drvr._remotefs, 'remove_file')
) as (mock_rem_fs_create, mock_rem_fs_remove):
result = drvr._is_storage_shared_with('host', '/path')
mock_rem_fs_create.assert_any_call('host', mock.ANY)
create_args, create_kwargs = mock_rem_fs_create.call_args
self.assertTrue(create_args[1].startswith('/path'))
if is_same:
mock_unlink.assert_called_once_with(mock.ANY)
else:
mock_rem_fs_remove.assert_called_with('host', mock.ANY)
remove_args, remove_kwargs = mock_rem_fs_remove.call_args
self.assertTrue(remove_args[1].startswith('/path'))
return result
def test_shared_storage_detection_same_host(self):
self.assertTrue(self._test_shared_storage_detection(True))
def test_shared_storage_detection_different_host(self):
self.assertFalse(self._test_shared_storage_detection(False))
def test_shared_storage_detection_easy(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(drvr, 'get_host_ip_addr')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(os, 'unlink')
drvr.get_host_ip_addr().AndReturn('foo')
self.mox.ReplayAll()
self.assertTrue(drvr._is_storage_shared_with('foo', '/path'))
def test_store_pid_remove_pid(self):
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
popen = mock.Mock(pid=3)
drvr.job_tracker.add_job(instance, popen.pid)
self.assertIn(3, drvr.job_tracker.jobs[instance.uuid])
drvr.job_tracker.remove_job(instance, popen.pid)
self.assertNotIn(instance.uuid, drvr.job_tracker.jobs)
@mock.patch('nova.virt.libvirt.host.Host.get_domain')
def test_get_domain_info_with_more_return(self, mock_get_domain):
instance = objects.Instance(**self.test_instance)
dom_mock = mock.MagicMock()
dom_mock.info.return_value = [
1, 2048, 737, 8, 12345, 888888
]
dom_mock.ID.return_value = mock.sentinel.instance_id
mock_get_domain.return_value = dom_mock
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_info(instance)
self.assertEqual(1, info.state)
self.assertEqual(2048, info.max_mem_kb)
self.assertEqual(737, info.mem_kb)
self.assertEqual(8, info.num_cpu)
self.assertEqual(12345, info.cpu_time_ns)
self.assertEqual(mock.sentinel.instance_id, info.id)
dom_mock.info.assert_called_once_with()
dom_mock.ID.assert_called_once_with()
mock_get_domain.assert_called_once_with(instance)
def test_create_domain(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_domain = mock.MagicMock()
guest = drvr._create_domain(domain=mock_domain)
self.assertEqual(mock_domain, guest._domain)
mock_domain.createWithFlags.assert_has_calls([mock.call(0)])
@mock.patch('nova.virt.disk.api.clean_lxc_namespace')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_create_domain_lxc(self, mock_get_inst_path, mock_ensure_tree,
mock_setup_container, mock_get_info, mock_clean):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
inst_sys_meta = dict()
mock_instance.system_metadata = inst_sys_meta
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.image.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.RUNNING)
with test.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')):
drvr._create_domain_and_network(self.context, 'xml',
mock_instance, [], None)
self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
self.assertFalse(mock_instance.called)
mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
drvr.image_backend.image.assert_has_calls([mock.call(mock_instance,
'disk')])
setup_container_call = mock.call(
mock_image.get_model(),
container_dir='/tmp/rootfs')
mock_setup_container.assert_has_calls([setup_container_call])
mock_get_info.assert_has_calls([mock.call(mock_instance)])
mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
@mock.patch('nova.virt.disk.api.clean_lxc_namespace')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch.object(fake_libvirt_utils, 'chown_for_id_maps')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_create_domain_lxc_id_maps(self, mock_get_inst_path,
mock_ensure_tree, mock_setup_container,
mock_chown, mock_get_info, mock_clean):
self.flags(virt_type='lxc', uid_maps=["0:1000:100"],
gid_maps=["0:1000:100"], group='libvirt')
def chown_side_effect(path, id_maps):
self.assertEqual('/tmp/rootfs', path)
self.assertIsInstance(id_maps[0], vconfig.LibvirtConfigGuestUIDMap)
self.assertEqual(0, id_maps[0].start)
self.assertEqual(1000, id_maps[0].target)
self.assertEqual(100, id_maps[0].count)
self.assertIsInstance(id_maps[1], vconfig.LibvirtConfigGuestGIDMap)
self.assertEqual(0, id_maps[1].start)
self.assertEqual(1000, id_maps[1].target)
self.assertEqual(100, id_maps[1].count)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
inst_sys_meta = dict()
mock_instance.system_metadata = inst_sys_meta
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.image.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_chown.side_effect = chown_side_effect
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.RUNNING)
with test.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')
) as (
mock_create_images_and_backing, mock_is_booted_from_volume,
mock_create_domain, mock_plug_vifs, mock_setup_basic_filtering,
mock_prepare_instance_filter, mock_apply_instance_filter
):
drvr._create_domain_and_network(self.context, 'xml',
mock_instance, [], None)
self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
self.assertFalse(mock_instance.called)
mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
mock_is_booted_from_volume.assert_called_once_with(mock_instance, {})
mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
drvr.image_backend.image.assert_has_calls([mock.call(mock_instance,
'disk')])
setup_container_call = mock.call(
mock_image.get_model(),
container_dir='/tmp/rootfs')
mock_setup_container.assert_has_calls([setup_container_call])
mock_get_info.assert_has_calls([mock.call(mock_instance)])
mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
@mock.patch('nova.virt.disk.api.teardown_container')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_create_domain_lxc_not_running(self, mock_get_inst_path,
mock_ensure_tree,
mock_setup_container,
mock_get_info, mock_teardown):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
inst_sys_meta = dict()
mock_instance.system_metadata = inst_sys_meta
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.image.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.SHUTDOWN)
with test.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')):
drvr._create_domain_and_network(self.context, 'xml',
mock_instance, [], None)
self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
self.assertFalse(mock_instance.called)
mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
drvr.image_backend.image.assert_has_calls([mock.call(mock_instance,
'disk')])
setup_container_call = mock.call(
mock_image.get_model(),
container_dir='/tmp/rootfs')
mock_setup_container.assert_has_calls([setup_container_call])
mock_get_info.assert_has_calls([mock.call(mock_instance)])
teardown_call = mock.call(container_dir='/tmp/rootfs')
mock_teardown.assert_has_calls([teardown_call])
def test_create_domain_define_xml_fails(self):
"""Tests that the xml is logged when defining the domain fails."""
fake_xml = "<test>this is a test</test>"
def fake_defineXML(xml):
self.assertEqual(fake_xml, xml)
raise fakelibvirt.libvirtError('virDomainDefineXML() failed')
def fake_safe_decode(text, *args, **kwargs):
return text + 'safe decoded'
self.log_error_called = False
def fake_error(msg, *args, **kwargs):
self.log_error_called = True
self.assertIn(fake_xml, msg % args)
self.assertIn('safe decoded', msg % args)
self.stubs.Set(encodeutils, 'safe_decode', fake_safe_decode)
self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error)
self.create_fake_libvirt_mock(defineXML=fake_defineXML)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain,
fake_xml)
self.assertTrue(self.log_error_called)
def test_create_domain_with_flags_fails(self):
"""Tests that the xml is logged when creating the domain with flags
fails
"""
fake_xml = "<test>this is a test</test>"
fake_domain = FakeVirtDomain(fake_xml)
def fake_createWithFlags(launch_flags):
raise fakelibvirt.libvirtError('virDomainCreateWithFlags() failed')
self.log_error_called = False
def fake_error(msg, *args, **kwargs):
self.log_error_called = True
self.assertIn(fake_xml, msg % args)
self.stubs.Set(fake_domain, 'createWithFlags', fake_createWithFlags)
self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error)
self.create_fake_libvirt_mock()
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain,
domain=fake_domain)
self.assertTrue(self.log_error_called)
def test_create_domain_enable_hairpin_fails(self):
"""Tests that the xml is logged when enabling hairpin mode for the
domain fails.
"""
fake_xml = "<test>this is a test</test>"
fake_domain = FakeVirtDomain(fake_xml)
def fake_execute(*args, **kwargs):
raise processutils.ProcessExecutionError('error')
def fake_get_interfaces(*args):
return ["dev"]
self.log_error_called = False
def fake_error(msg, *args, **kwargs):
self.log_error_called = True
self.assertIn(fake_xml, msg % args)
self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error)
self.create_fake_libvirt_mock()
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.stubs.Set(nova.utils, 'execute', fake_execute)
self.stubs.Set(
nova.virt.libvirt.guest.Guest, 'get_interfaces',
fake_get_interfaces)
self.assertRaises(processutils.ProcessExecutionError,
drvr._create_domain,
domain=fake_domain,
power_on=False)
self.assertTrue(self.log_error_called)
def test_get_vnc_console(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<graphics type='vnc' port='5900'/>"
"</devices></domain>")
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
vnc_dict = drvr.get_vnc_console(self.context, instance)
self.assertEqual(vnc_dict.port, '5900')
def test_get_vnc_console_unavailable(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices></devices></domain>")
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ConsoleTypeUnavailable,
drvr.get_vnc_console, self.context, instance)
def test_get_spice_console(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<graphics type='spice' port='5950'/>"
"</devices></domain>")
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
spice_dict = drvr.get_spice_console(self.context, instance)
self.assertEqual(spice_dict.port, '5950')
def test_get_spice_console_unavailable(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices></devices></domain>")
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ConsoleTypeUnavailable,
drvr.get_spice_console, self.context, instance)
def test_detach_volume_with_instance_not_found(self):
# Test that detach_volume() method does not raise exception,
# if the instance does not exist.
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(host.Host, 'get_domain',
side_effect=exception.InstanceNotFound(
instance_id=instance.uuid)),
mock.patch.object(drvr, '_disconnect_volume')
) as (_get_domain, _disconnect_volume):
connection_info = {'driver_volume_type': 'fake'}
drvr.detach_volume(connection_info, instance, '/dev/sda')
_get_domain.assert_called_once_with(instance)
_disconnect_volume.assert_called_once_with(connection_info,
'sda')
def _test_attach_detach_interface_get_config(self, method_name):
"""Tests that the get_config() method is properly called in
attach_interface() and detach_interface().
method_name: either \"attach_interface\" or \"detach_interface\"
depending on the method to test.
"""
self.stubs.Set(host.Host, "get_domain", lambda a, b: FakeVirtDomain())
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self, 1)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_image_meta = objects.ImageMeta.from_dict(
{'id': instance['image_ref']})
if method_name == "attach_interface":
self.mox.StubOutWithMock(drvr.firewall_driver,
'setup_basic_filtering')
drvr.firewall_driver.setup_basic_filtering(instance, network_info)
expected = drvr.vif_driver.get_config(instance, network_info[0],
fake_image_meta,
instance.get_flavor(),
CONF.libvirt.virt_type,
drvr._host)
self.mox.StubOutWithMock(drvr.vif_driver, 'get_config')
drvr.vif_driver.get_config(instance, network_info[0],
mox.IsA(objects.ImageMeta),
mox.IsA(objects.Flavor),
CONF.libvirt.virt_type,
drvr._host).\
AndReturn(expected)
self.mox.ReplayAll()
if method_name == "attach_interface":
drvr.attach_interface(instance, fake_image_meta,
network_info[0])
elif method_name == "detach_interface":
drvr.detach_interface(instance, network_info[0])
else:
raise ValueError("Unhandled method %s" % method_name)
@mock.patch.object(lockutils, "external_lock")
def test_attach_interface_get_config(self, mock_lock):
"""Tests that the get_config() method is properly called in
attach_interface().
"""
mock_lock.return_value = threading.Semaphore()
self._test_attach_detach_interface_get_config("attach_interface")
def test_detach_interface_get_config(self):
"""Tests that the get_config() method is properly called in
detach_interface().
"""
self._test_attach_detach_interface_get_config("detach_interface")
def test_default_root_device_name(self):
instance = {'uuid': 'fake_instance'}
image_meta = objects.ImageMeta.from_dict({'id': 'fake'})
root_bdm = {'source_type': 'image',
'detination_type': 'volume',
'image_id': 'fake_id'}
self.flags(virt_type='fake_libvirt_type', group='libvirt')
self.mox.StubOutWithMock(blockinfo, 'get_disk_bus_for_device_type')
self.mox.StubOutWithMock(blockinfo, 'get_root_info')
blockinfo.get_disk_bus_for_device_type(instance,
'fake_libvirt_type',
image_meta,
'disk').InAnyOrder().\
AndReturn('virtio')
blockinfo.get_disk_bus_for_device_type(instance,
'fake_libvirt_type',
image_meta,
'cdrom').InAnyOrder().\
AndReturn('ide')
blockinfo.get_root_info(instance, 'fake_libvirt_type',
image_meta, root_bdm,
'virtio', 'ide').AndReturn({'dev': 'vda'})
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(drvr.default_root_device_name(instance, image_meta,
root_bdm), '/dev/vda')
@mock.patch.object(objects.BlockDeviceMapping, "save")
def test_default_device_names_for_instance(self, save_mock):
instance = objects.Instance(**self.test_instance)
instance.root_device_name = '/dev/vda'
ephemerals = [objects.BlockDeviceMapping(
**fake_block_device.AnonFakeDbBlockDeviceDict(
{'device_name': 'vdb',
'source_type': 'blank',
'volume_size': 2,
'destination_type': 'local'}))]
swap = [objects.BlockDeviceMapping(
**fake_block_device.AnonFakeDbBlockDeviceDict(
{'device_name': 'vdg',
'source_type': 'blank',
'volume_size': 512,
'guest_format': 'swap',
'destination_type': 'local'}))]
block_device_mapping = [
objects.BlockDeviceMapping(
**fake_block_device.AnonFakeDbBlockDeviceDict(
{'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-image-id',
'device_name': '/dev/vdxx',
'disk_bus': 'scsi'}))]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.default_device_names_for_instance(instance,
instance.root_device_name,
ephemerals, swap,
block_device_mapping)
# Ephemeral device name was correct so no changes
self.assertEqual('/dev/vdb', ephemerals[0].device_name)
# Swap device name was incorrect so it was changed
self.assertEqual('/dev/vdc', swap[0].device_name)
# Volume device name was changed too, taking the bus into account
self.assertEqual('/dev/sda', block_device_mapping[0].device_name)
self.assertEqual(3, save_mock.call_count)
def _test_get_device_name_for_instance(self, new_bdm, expected_dev):
instance = objects.Instance(**self.test_instance)
instance.root_device_name = '/dev/vda'
instance.ephemeral_gb = 0
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
got_dev = drvr.get_device_name_for_instance(
instance, [], new_bdm)
self.assertEqual(expected_dev, got_dev)
def test_get_device_name_for_instance_simple(self):
new_bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
boot_index=-1, volume_id='fake-id',
device_name=None, guest_format=None,
disk_bus=None, device_type=None)
self._test_get_device_name_for_instance(new_bdm, '/dev/vdb')
def test_get_device_name_for_instance_suggested(self):
new_bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
boot_index=-1, volume_id='fake-id',
device_name='/dev/vdg', guest_format=None,
disk_bus=None, device_type=None)
self._test_get_device_name_for_instance(new_bdm, '/dev/vdb')
def test_get_device_name_for_instance_bus(self):
new_bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
boot_index=-1, volume_id='fake-id',
device_name=None, guest_format=None,
disk_bus='scsi', device_type=None)
self._test_get_device_name_for_instance(new_bdm, '/dev/sda')
def test_get_device_name_for_instance_device_type(self):
new_bdm = objects.BlockDeviceMapping(
context=context,
source_type='volume', destination_type='volume',
boot_index=-1, volume_id='fake-id',
device_name=None, guest_format=None,
disk_bus=None, device_type='floppy')
self._test_get_device_name_for_instance(new_bdm, '/dev/fda')
def test_is_supported_fs_format(self):
supported_fs = [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3,
disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
for fs in supported_fs:
self.assertTrue(drvr.is_supported_fs_format(fs))
supported_fs = ['', 'dummy']
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
for fs in supported_fs:
self.assertFalse(drvr.is_supported_fs_format(fs))
def test_post_live_migration_at_destination_with_block_device_info(self):
# Preparing mocks
mock_domain = self.mox.CreateMock(fakelibvirt.virDomain)
self.resultXML = None
def fake_getLibVersion():
return fakelibvirt.FAKE_LIBVIRT_VERSION
def fake_getCapabilities():
return """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
</cpu>
</host>
</capabilities>
"""
def fake_to_xml(context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
if image_meta is None:
image_meta = objects.ImageMeta.from_dict({})
conf = drvr._get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info)
self.resultXML = conf.to_xml()
return self.resultXML
def fake_get_domain(instance):
return mock_domain
def fake_baselineCPU(cpu, flag):
return """<cpu mode='custom' match='exact'>
<model fallback='allow'>Westmere</model>
<vendor>Intel</vendor>
<feature policy='require' name='aes'/>
</cpu>
"""
network_info = _fake_network_info(self, 1)
self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
getCapabilities=fake_getCapabilities,
getVersion=lambda: 1005001,
listDefinedDomains=lambda: [],
numOfDomains=lambda: 0,
baselineCPU=fake_baselineCPU)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
instance = objects.Instance(**instance_ref)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr,
'_get_guest_xml',
fake_to_xml)
self.stubs.Set(host.Host,
'get_domain',
fake_get_domain)
bdm = objects.BlockDeviceMapping(
self.context,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 1, 'guest_format': None,
'boot_index': 0,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': '/dev/vda',
'disk_bus': 'virtio',
'device_type': 'disk',
'delete_on_termination': False}))
block_device_info = {'block_device_mapping':
driver_block_device.convert_volumes([bdm])}
block_device_info['block_device_mapping'][0]['connection_info'] = (
{'driver_volume_type': 'iscsi'})
with test.nested(
mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'),
mock.patch.object(objects.Instance, 'save')
) as (mock_volume_save, mock_instance_save):
drvr.post_live_migration_at_destination(
self.context, instance, network_info, True,
block_device_info=block_device_info)
self.assertIn('fake', self.resultXML)
mock_volume_save.assert_called_once_with()
def test_create_propagates_exceptions(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(id=1, uuid='fake-uuid',
image_ref='my_fake_image')
with test.nested(
mock.patch.object(drvr, '_create_domain_setup_lxc'),
mock.patch.object(drvr, '_create_domain_cleanup_lxc'),
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr, 'firewall_driver'),
mock.patch.object(drvr, '_create_domain',
side_effect=exception.NovaException),
mock.patch.object(drvr, 'cleanup')):
self.assertRaises(exception.NovaException,
drvr._create_domain_and_network,
self.context,
'xml',
instance, None, None)
def test_create_without_pause(self):
self.flags(virt_type='lxc', group='libvirt')
@contextlib.contextmanager
def fake_lxc_disk_handler(*args, **kwargs):
yield
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
with test.nested(
mock.patch.object(drvr, '_lxc_disk_handler',
side_effect=fake_lxc_disk_handler),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr, 'firewall_driver'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'cleanup')) as (
_handler, cleanup, firewall_driver, create, plug_vifs):
domain = drvr._create_domain_and_network(self.context, 'xml',
instance, None, None)
self.assertEqual(0, create.call_args_list[0][1]['pause'])
self.assertEqual(0, domain.resume.call_count)
def _test_create_with_network_events(self, neutron_failure=None,
power_on=True):
generated_events = []
def wait_timeout():
event = mock.MagicMock()
if neutron_failure == 'timeout':
raise eventlet.timeout.Timeout()
elif neutron_failure == 'error':
event.status = 'failed'
else:
event.status = 'completed'
return event
def fake_prepare(instance, event_name):
m = mock.MagicMock()
m.instance = instance
m.event_name = event_name
m.wait.side_effect = wait_timeout
generated_events.append(m)
return m
virtapi = manager.ComputeVirtAPI(mock.MagicMock())
prepare = virtapi._compute.instance_events.prepare_for_instance_event
prepare.side_effect = fake_prepare
drvr = libvirt_driver.LibvirtDriver(virtapi, False)
instance = objects.Instance(**self.test_instance)
vifs = [{'id': 'vif1', 'active': False},
{'id': 'vif2', 'active': False}]
@mock.patch.object(drvr, 'plug_vifs')
@mock.patch.object(drvr, 'firewall_driver')
@mock.patch.object(drvr, '_create_domain')
@mock.patch.object(drvr, 'cleanup')
def test_create(cleanup, create, fw_driver, plug_vifs):
domain = drvr._create_domain_and_network(self.context, 'xml',
instance, vifs, None,
power_on=power_on)
plug_vifs.assert_called_with(instance, vifs)
pause = self._get_pause_flag(drvr, vifs, power_on=power_on)
self.assertEqual(pause,
create.call_args_list[0][1]['pause'])
if pause:
domain.resume.assert_called_once_with()
if neutron_failure and CONF.vif_plugging_is_fatal:
cleanup.assert_called_once_with(self.context,
instance, network_info=vifs,
block_device_info=None)
test_create()
if utils.is_neutron() and CONF.vif_plugging_timeout and power_on:
prepare.assert_has_calls([
mock.call(instance, 'network-vif-plugged-vif1'),
mock.call(instance, 'network-vif-plugged-vif2')])
for event in generated_events:
if neutron_failure and generated_events.index(event) != 0:
self.assertEqual(0, event.call_count)
elif (neutron_failure == 'error' and
not CONF.vif_plugging_is_fatal):
event.wait.assert_called_once_with()
else:
self.assertEqual(0, prepare.call_count)
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron(self, is_neutron):
self._test_create_with_network_events()
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_power_off(self,
is_neutron):
# Tests that we don't wait for events if we don't start the instance.
self._test_create_with_network_events(power_on=False)
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_nowait(self, is_neutron):
self.flags(vif_plugging_timeout=0)
self._test_create_with_network_events()
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_nonfatal_timeout(
self, is_neutron):
self.flags(vif_plugging_is_fatal=False)
self._test_create_with_network_events(neutron_failure='timeout')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_fatal_timeout(
self, is_neutron):
self.assertRaises(exception.VirtualInterfaceCreateException,
self._test_create_with_network_events,
neutron_failure='timeout')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_nonfatal_error(
self, is_neutron):
self.flags(vif_plugging_is_fatal=False)
self._test_create_with_network_events(neutron_failure='error')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_fatal_error(
self, is_neutron):
self.assertRaises(exception.VirtualInterfaceCreateException,
self._test_create_with_network_events,
neutron_failure='error')
@mock.patch('nova.utils.is_neutron', return_value=False)
def test_create_with_network_events_non_neutron(self, is_neutron):
self._test_create_with_network_events()
@mock.patch('nova.volume.encryptors.get_encryption_metadata')
@mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
def test_create_with_bdm(self, get_info_from_bdm, get_encryption_metadata):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_dom = mock.MagicMock()
mock_encryption_meta = mock.MagicMock()
get_encryption_metadata.return_value = mock_encryption_meta
fake_xml = """
<domain>
<name>instance-00000001</name>
<memory>1048576</memory>
<vcpu>1</vcpu>
<devices>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' cache='none'/>
<source file='/path/fake-volume1'/>
<target dev='vda' bus='virtio'/>
</disk>
</devices>
</domain>
"""
fake_volume_id = "fake-volume-id"
connection_info = {"driver_volume_type": "fake",
"data": {"access_mode": "rw",
"volume_id": fake_volume_id}}
def fake_getitem(*args, **kwargs):
fake_bdm = {'connection_info': connection_info,
'mount_device': '/dev/vda'}
return fake_bdm.get(args[0])
mock_volume = mock.MagicMock()
mock_volume.__getitem__.side_effect = fake_getitem
block_device_info = {'block_device_mapping': [mock_volume]}
network_info = [network_model.VIF(id='1'),
network_model.VIF(id='2', active=True)]
with test.nested(
mock.patch.object(drvr, '_get_volume_encryptor'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver,
'prepare_instance_filter'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'),
) as (get_volume_encryptor, plug_vifs, setup_basic_filtering,
prepare_instance_filter, create_domain, apply_instance_filter):
create_domain.return_value = libvirt_guest.Guest(mock_dom)
guest = drvr._create_domain_and_network(
self.context, fake_xml, instance, network_info, None,
block_device_info=block_device_info)
get_encryption_metadata.assert_called_once_with(self.context,
drvr._volume_api, fake_volume_id, connection_info)
get_volume_encryptor.assert_called_once_with(connection_info,
mock_encryption_meta)
plug_vifs.assert_called_once_with(instance, network_info)
setup_basic_filtering.assert_called_once_with(instance,
network_info)
prepare_instance_filter.assert_called_once_with(instance,
network_info)
pause = self._get_pause_flag(drvr, network_info)
create_domain.assert_called_once_with(
fake_xml, pause=pause, power_on=True)
self.assertEqual(mock_dom, guest._domain)
def test_get_guest_storage_config(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
test_instance = copy.deepcopy(self.test_instance)
test_instance["default_swap_device"] = None
instance = objects.Instance(**test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = instance.get_flavor()
conn_info = {'driver_volume_type': 'fake', 'data': {}}
bdm = objects.BlockDeviceMapping(
self.context,
**fake_block_device.FakeDbBlockDeviceDict({
'id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': '/dev/vdc'}))
bdi = {'block_device_mapping':
driver_block_device.convert_volumes([bdm])}
bdm = bdi['block_device_mapping'][0]
bdm['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
bdi)
mock_conf = mock.MagicMock(source_path='fake')
with test.nested(
mock.patch.object(driver_block_device.DriverVolumeBlockDevice,
'save'),
mock.patch.object(drvr, '_connect_volume'),
mock.patch.object(drvr, '_get_volume_config',
return_value=mock_conf),
mock.patch.object(drvr, '_set_cache_mode')
) as (volume_save, connect_volume, get_volume_config, set_cache_mode):
devices = drvr._get_guest_storage_config(instance, image_meta,
disk_info, False, bdi, flavor, "hvm")
self.assertEqual(3, len(devices))
self.assertEqual('/dev/vdb', instance.default_ephemeral_device)
self.assertIsNone(instance.default_swap_device)
connect_volume.assert_called_with(bdm['connection_info'],
{'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
get_volume_config.assert_called_with(bdm['connection_info'],
{'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
volume_save.assert_called_once_with()
self.assertEqual(3, set_cache_mode.call_count)
def test_get_neutron_events(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = [network_model.VIF(id='1'),
network_model.VIF(id='2', active=True)]
events = drvr._get_neutron_events(network_info)
self.assertEqual([('network-vif-plugged', '1')], events)
def test_unplug_vifs_ignores_errors(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
with mock.patch.object(drvr, 'vif_driver') as vif_driver:
vif_driver.unplug.side_effect = exception.AgentError(
method='unplug')
drvr._unplug_vifs('inst', [1], ignore_errors=True)
vif_driver.unplug.assert_called_once_with('inst', 1)
def test_unplug_vifs_reports_errors(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
with mock.patch.object(drvr, 'vif_driver') as vif_driver:
vif_driver.unplug.side_effect = exception.AgentError(
method='unplug')
self.assertRaises(exception.AgentError,
drvr.unplug_vifs, 'inst', [1])
vif_driver.unplug.assert_called_once_with('inst', 1)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
def test_cleanup_pass_with_no_mount_device(self, undefine, unplug):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
drvr.firewall_driver = mock.Mock()
drvr._disconnect_volume = mock.Mock()
fake_inst = {'name': 'foo'}
fake_bdms = [{'connection_info': 'foo',
'mount_device': None}]
with mock.patch('nova.virt.driver'
'.block_device_info_get_mapping',
return_value=fake_bdms):
drvr.cleanup('ctxt', fake_inst, 'netinfo', destroy_disks=False)
self.assertTrue(drvr._disconnect_volume.called)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
def test_cleanup_wants_vif_errors_ignored(self, undefine, unplug):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
fake_inst = {'name': 'foo'}
with mock.patch.object(drvr._conn, 'lookupByName') as lookup:
lookup.return_value = fake_inst
# NOTE(danms): Make unplug cause us to bail early, since
# we only care about how it was called
unplug.side_effect = test.TestingException
self.assertRaises(test.TestingException,
drvr.cleanup, 'ctxt', fake_inst, 'netinfo')
unplug.assert_called_once_with(fake_inst, 'netinfo', True)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'unfilter_instance')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files',
return_value=True)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
def test_cleanup_migrate_data_shared_block_storage(self,
_undefine_domain,
save,
delete_instance_files,
unfilter_instance):
# Tests the cleanup method when migrate_data has
# is_shared_block_storage=True and destroy_disks=False.
instance = objects.Instance(self.context, **self.test_instance)
migrate_data = objects.LibvirtLiveMigrateData(
is_shared_block_storage=True)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
drvr.cleanup(
self.context, instance, network_info={}, destroy_disks=False,
migrate_data=migrate_data, destroy_vifs=False)
delete_instance_files.assert_called_once_with(instance)
self.assertEqual(1, int(instance.system_metadata['clean_attempts']))
self.assertTrue(instance.cleaned)
save.assert_called_once_with()
def test_swap_volume(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
mock_dom = mock.MagicMock()
guest = libvirt_guest.Guest(mock_dom)
with mock.patch.object(drvr._conn, 'defineXML',
create=True) as mock_define:
xmldoc = "<domain/>"
srcfile = "/first/path"
dstfile = "/second/path"
mock_dom.XMLDesc.return_value = xmldoc
mock_dom.isPersistent.return_value = True
mock_dom.blockJobInfo.return_value = {}
drvr._swap_volume(guest, srcfile, dstfile, 1)
mock_dom.XMLDesc.assert_called_once_with(
flags=(fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
fakelibvirt.VIR_DOMAIN_XML_SECURE))
mock_dom.blockRebase.assert_called_once_with(
srcfile, dstfile, 0, flags=(
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT))
mock_dom.blockResize.assert_called_once_with(
srcfile, 1 * units.Gi / units.Ki)
mock_define.assert_called_once_with(xmldoc)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._swap_volume')
@mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.save')
@mock.patch('nova.objects.block_device.BlockDeviceMapping.'
'get_by_volume_and_instance')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_config')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._connect_volume')
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
def test_swap_volume_driver_bdm_save(self, get_guest,
connect_volume, get_volume_config,
get_by_volume_and_instance,
volume_save, swap_volume,
disconnect_volume):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
old_connection_info = {'driver_volume_type': 'fake',
'serial': 'old-volume-id',
'data': {'device_path': '/fake-old-volume',
'access_mode': 'rw'}}
new_connection_info = {'driver_volume_type': 'fake',
'serial': 'new-volume-id',
'data': {'device_path': '/fake-new-volume',
'access_mode': 'rw'}}
mock_dom = mock.MagicMock()
guest = libvirt_guest.Guest(mock_dom)
mock_dom.XMLDesc.return_value = """<domain>
<devices>
<disk type='file'>
<source file='/fake-old-volume'/>
<target dev='vdb' bus='virtio'/>
</disk>
</devices>
</domain>
"""
mock_dom.name.return_value = 'inst'
mock_dom.UUIDString.return_value = 'uuid'
get_guest.return_value = guest
disk_info = {'bus': 'virtio', 'type': 'disk', 'dev': 'vdb'}
get_volume_config.return_value = mock.MagicMock(
source_path='/fake-new-volume')
bdm = objects.BlockDeviceMapping(self.context,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vdb',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-2',
'boot_index': 0}))
get_by_volume_and_instance.return_value = bdm
conn.swap_volume(old_connection_info, new_connection_info, instance,
'/dev/vdb', 1)
get_guest.assert_called_once_with(instance)
connect_volume.assert_called_once_with(new_connection_info, disk_info)
swap_volume.assert_called_once_with(guest, 'vdb',
'/fake-new-volume', 1)
disconnect_volume.assert_called_once_with(old_connection_info, 'vdb')
volume_save.assert_called_once_with()
def _test_live_snapshot(self, can_quiesce=False, require_quiesce=False):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
mock_dom = mock.MagicMock()
test_image_meta = self.test_image_meta.copy()
if require_quiesce:
test_image_meta = {'properties': {'os_require_quiesce': 'yes'}}
with test.nested(
mock.patch.object(drvr._conn, 'defineXML', create=True),
mock.patch.object(fake_libvirt_utils, 'get_disk_size'),
mock.patch.object(fake_libvirt_utils, 'get_disk_backing_file'),
mock.patch.object(fake_libvirt_utils, 'create_cow_image'),
mock.patch.object(fake_libvirt_utils, 'chown'),
mock.patch.object(fake_libvirt_utils, 'extract_snapshot'),
mock.patch.object(drvr, '_set_quiesced')
) as (mock_define, mock_size, mock_backing, mock_create_cow,
mock_chown, mock_snapshot, mock_quiesce):
xmldoc = "<domain/>"
srcfile = "/first/path"
dstfile = "/second/path"
bckfile = "/other/path"
dltfile = dstfile + ".delta"
mock_dom.XMLDesc.return_value = xmldoc
mock_dom.isPersistent.return_value = True
mock_size.return_value = 1004009
mock_backing.return_value = bckfile
guest = libvirt_guest.Guest(mock_dom)
if not can_quiesce:
mock_quiesce.side_effect = (
exception.InstanceQuiesceNotSupported(
instance_id=self.test_instance['id'], reason='test'))
image_meta = objects.ImageMeta.from_dict(test_image_meta)
drvr._live_snapshot(self.context, self.test_instance, guest,
srcfile, dstfile, "qcow2", "qcow2", image_meta)
mock_dom.XMLDesc.assert_called_once_with(flags=(
fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
fakelibvirt.VIR_DOMAIN_XML_SECURE))
mock_dom.blockRebase.assert_called_once_with(
srcfile, dltfile, 0, flags=(
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW))
mock_size.assert_called_once_with(srcfile, format="qcow2")
mock_backing.assert_called_once_with(srcfile, basename=False,
format="qcow2")
mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009)
mock_chown.assert_called_once_with(dltfile, os.getuid())
mock_snapshot.assert_called_once_with(dltfile, "qcow2",
dstfile, "qcow2")
mock_define.assert_called_once_with(xmldoc)
mock_quiesce.assert_any_call(mock.ANY, self.test_instance,
mock.ANY, True)
if can_quiesce:
mock_quiesce.assert_any_call(mock.ANY, self.test_instance,
mock.ANY, False)
def test_live_snapshot(self):
self._test_live_snapshot()
def test_live_snapshot_with_quiesce(self):
self._test_live_snapshot(can_quiesce=True)
def test_live_snapshot_with_require_quiesce(self):
self._test_live_snapshot(can_quiesce=True, require_quiesce=True)
def test_live_snapshot_with_require_quiesce_fails(self):
self.assertRaises(exception.InstanceQuiesceNotSupported,
self._test_live_snapshot,
can_quiesce=False, require_quiesce=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration")
def test_live_migration_hostname_valid(self, mock_lm):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.live_migration(self.context, self.test_instance,
"host1.example.com",
lambda x: x,
lambda x: x)
self.assertEqual(1, mock_lm.call_count)
@mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration")
@mock.patch.object(fake_libvirt_utils, "is_valid_hostname")
def test_live_migration_hostname_invalid(self, mock_hostname, mock_lm):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_hostname.return_value = False
self.assertRaises(exception.InvalidHostname,
drvr.live_migration,
self.context, self.test_instance,
"foo/?com=/bin/sh",
lambda x: x,
lambda x: x)
@mock.patch.object(libvirt_driver.LibvirtDriver, "pause")
def test_live_migration_force_complete(self, pause):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.live_migration_force_complete(self.test_instance)
pause.assert_called_once_with(self.test_instance)
@mock.patch.object(fakelibvirt.virDomain, "abortJob")
def test_live_migration_abort(self, mock_abort):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", False)
guest = libvirt_guest.Guest(dom)
with mock.patch.object(nova.virt.libvirt.host.Host, 'get_guest',
return_value=guest):
drvr.live_migration_abort(self.test_instance)
self.assertTrue(mock_abort.called)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('tempfile.mkstemp')
@mock.patch('os.close', return_value=None)
def test_check_instance_shared_storage_local_raw(self,
mock_close,
mock_mkstemp,
mock_exists):
instance_uuid = str(uuid.uuid4())
self.flags(images_type='raw', group='libvirt')
self.flags(instances_path='/tmp')
mock_mkstemp.return_value = (-1,
'/tmp/{0}/file'.format(instance_uuid))
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
temp_file = driver.check_instance_shared_storage_local(self.context,
instance)
self.assertEqual('/tmp/{0}/file'.format(instance_uuid),
temp_file['filename'])
def test_check_instance_shared_storage_local_rbd(self):
self.flags(images_type='rbd', group='libvirt')
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
self.assertIsNone(driver.
check_instance_shared_storage_local(self.context,
instance))
def test_version_to_string(self):
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
string_ver = driver._version_to_string((4, 33, 173))
self.assertEqual("4.33.173", string_ver)
def test_parallels_min_version_fail(self):
self.flags(virt_type='parallels', group='libvirt')
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(driver._conn, 'getLibVersion',
return_value=1002011):
self.assertRaises(exception.NovaException,
driver.init_host, 'wibble')
def test_parallels_min_version_ok(self):
self.flags(virt_type='parallels', group='libvirt')
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(driver._conn, 'getLibVersion',
return_value=1002012):
driver.init_host('wibble')
def test_get_guest_config_parallels_vm(self):
self.flags(virt_type='parallels', group='libvirt')
self.flags(images_type='ploop', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info)
self.assertEqual("parallels", cfg.virt_type)
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vm_mode.HVM, cfg.os_type)
self.assertIsNone(cfg.os_root)
self.assertEqual(6, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[0].driver_format, "ploop")
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
def test_get_guest_config_parallels_ct(self):
self.flags(virt_type='parallels', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
ct_instance = self.test_instance.copy()
ct_instance["vm_mode"] = vm_mode.EXE
instance_ref = objects.Instance(**ct_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, {'mapping': {'disk': {}}})
self.assertEqual("parallels", cfg.virt_type)
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vm_mode.EXE, cfg.os_type)
self.assertEqual("/sbin/init", cfg.os_init_path)
self.assertIsNone(cfg.os_root)
self.assertEqual(4, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestFilesys)
fs = cfg.devices[0]
self.assertEqual(fs.source_type, "file")
self.assertEqual(fs.driver_type, "ploop")
self.assertEqual(fs.target_dir, "/")
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestVideo)
def _test_get_guest_config_parallels_volume(self, vmmode, devices):
self.flags(virt_type='parallels', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
ct_instance = self.test_instance.copy()
ct_instance["vm_mode"] = vmmode
instance_ref = objects.Instance(**ct_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
conn_info = {'driver_volume_type': 'fake'}
bdm = objects.BlockDeviceMapping(
self.context,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 0,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/sda'}))
info = {'block_device_mapping': driver_block_device.convert_volumes(
[bdm])}
info['block_device_mapping'][0]['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
info)
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self, 1),
image_meta, disk_info, None, info)
mock_save.assert_called_once_with()
self.assertEqual("parallels", cfg.virt_type)
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vmmode, cfg.os_type)
self.assertIsNone(cfg.os_root)
self.assertEqual(devices, len(cfg.devices))
disk_found = False
for dev in cfg.devices:
result = isinstance(dev, vconfig.LibvirtConfigGuestFilesys)
self.assertFalse(result)
if (isinstance(dev, vconfig.LibvirtConfigGuestDisk) and
(dev.source_path is None or
'disk.local' not in dev.source_path)):
self.assertEqual("disk", dev.source_device)
self.assertEqual("sda", dev.target_dev)
disk_found = True
self.assertTrue(disk_found)
def test_get_guest_config_parallels_volume(self):
self._test_get_guest_config_parallels_volume(vm_mode.EXE, 4)
self._test_get_guest_config_parallels_volume(vm_mode.HVM, 6)
def test_get_guest_disk_config_rbd_older_config_drive_fall_back(self):
# New config drives are stored in rbd but existing instances have
# config drives in the old location under the instances path.
# Test that the driver falls back to 'raw' for config drive if it
# doesn't exist in rbd.
self.flags(images_type='rbd', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock()
mock_rbd_image = mock.Mock()
mock_raw_image = mock.Mock()
mock_raw_image.libvirt_info.return_value = mock.sentinel.diskconfig
drvr.image_backend.image.side_effect = [mock_rbd_image,
mock_raw_image]
mock_rbd_image.check_image_exists.return_value = False
instance = objects.Instance()
disk_mapping = {'disk.config': {'bus': 'ide',
'dev': 'hdd',
'type': 'file'}}
flavor = objects.Flavor(extra_specs={})
diskconfig = drvr._get_guest_disk_config(
instance, 'disk.config', disk_mapping, flavor,
drvr._get_disk_config_image_type())
self.assertEqual(2, drvr.image_backend.image.call_count)
call1 = mock.call(instance, 'disk.config', 'rbd')
call2 = mock.call(instance, 'disk.config', 'raw')
drvr.image_backend.image.assert_has_calls([call1, call2])
self.assertEqual(mock.sentinel.diskconfig, diskconfig)
def _test_prepare_domain_for_snapshot(self, live_snapshot, state):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance_ref = objects.Instance(**self.test_instance)
with mock.patch.object(drvr, "suspend") as mock_suspend:
drvr._prepare_domain_for_snapshot(
self.context, live_snapshot, state, instance_ref)
return mock_suspend.called
def test_prepare_domain_for_snapshot(self):
# Ensure that suspend() is only called on RUNNING or PAUSED instances
for test_power_state in power_state.STATE_MAP.keys():
if test_power_state in (power_state.RUNNING, power_state.PAUSED):
self.assertTrue(self._test_prepare_domain_for_snapshot(
False, test_power_state))
else:
self.assertFalse(self._test_prepare_domain_for_snapshot(
False, test_power_state))
def test_prepare_domain_for_snapshot_lxc(self):
self.flags(virt_type='lxc', group='libvirt')
# Ensure that suspend() is never called with LXC
for test_power_state in power_state.STATE_MAP.keys():
self.assertFalse(self._test_prepare_domain_for_snapshot(
False, test_power_state))
def test_prepare_domain_for_snapshot_live_snapshots(self):
# Ensure that suspend() is never called for live snapshots
for test_power_state in power_state.STATE_MAP.keys():
self.assertFalse(self._test_prepare_domain_for_snapshot(
True, test_power_state))
class HostStateTestCase(test.NoDBTestCase):
cpu_info = {"vendor": "Intel", "model": "pentium", "arch": "i686",
"features": ["ssse3", "monitor", "pni", "sse2", "sse",
"fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge",
"mtrr", "sep", "apic"],
"topology": {"cores": "1", "threads": "1", "sockets": "1"}}
instance_caps = [(arch.X86_64, "kvm", "hvm"),
(arch.I686, "kvm", "hvm")]
pci_devices = [{
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:10.3",
"product_id": '1521',
"vendor_id": '8086',
"dev_type": fields.PciDeviceType.SRIOV_PF,
"phys_function": None}]
numa_topology = objects.NUMATopology(
cells=[objects.NUMACell(
id=1, cpuset=set([1, 2]), memory=1024,
cpu_usage=0, memory_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(
id=2, cpuset=set([3, 4]), memory=1024,
cpu_usage=0, memory_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([]))])
class FakeConnection(libvirt_driver.LibvirtDriver):
"""Fake connection object."""
def __init__(self):
super(HostStateTestCase.FakeConnection,
self).__init__(fake.FakeVirtAPI(), True)
self._host = host.Host("qemu:///system")
def _get_memory_mb_total():
return 497
def _get_memory_mb_used():
return 88
self._host.get_memory_mb_total = _get_memory_mb_total
self._host.get_memory_mb_used = _get_memory_mb_used
def _get_vcpu_total(self):
return 1
def _get_vcpu_used(self):
return 0
def _get_cpu_info(self):
return HostStateTestCase.cpu_info
def _get_disk_over_committed_size_total(self):
return 0
def _get_local_gb_info(self):
return {'total': 100, 'used': 20, 'free': 80}
def get_host_uptime(self):
return ('10:01:16 up 1:36, 6 users, '
'load average: 0.21, 0.16, 0.19')
def _get_disk_available_least(self):
return 13091
def _get_instance_capabilities(self):
return HostStateTestCase.instance_caps
def _get_pci_passthrough_devices(self):
return jsonutils.dumps(HostStateTestCase.pci_devices)
def _get_host_numa_topology(self):
return HostStateTestCase.numa_topology
@mock.patch.object(fakelibvirt, "openAuth")
def test_update_status(self, mock_open):
mock_open.return_value = fakelibvirt.Connection("qemu:///system")
drvr = HostStateTestCase.FakeConnection()
stats = drvr.get_available_resource("compute1")
self.assertEqual(stats["vcpus"], 1)
self.assertEqual(stats["memory_mb"], 497)
self.assertEqual(stats["local_gb"], 100)
self.assertEqual(stats["vcpus_used"], 0)
self.assertEqual(stats["memory_mb_used"], 88)
self.assertEqual(stats["local_gb_used"], 20)
self.assertEqual(stats["hypervisor_type"], 'QEMU')
self.assertEqual(stats["hypervisor_version"], 1001000)
self.assertEqual(stats["hypervisor_hostname"], 'compute1')
cpu_info = jsonutils.loads(stats["cpu_info"])
self.assertEqual(cpu_info,
{"vendor": "Intel", "model": "pentium",
"arch": arch.I686,
"features": ["ssse3", "monitor", "pni", "sse2", "sse",
"fxsr", "clflush", "pse36", "pat", "cmov",
"mca", "pge", "mtrr", "sep", "apic"],
"topology": {"cores": "1", "threads": "1", "sockets": "1"}
})
self.assertEqual(stats["disk_available_least"], 80)
self.assertEqual(jsonutils.loads(stats["pci_passthrough_devices"]),
HostStateTestCase.pci_devices)
self.assertThat(objects.NUMATopology.obj_from_db_obj(
stats['numa_topology'])._to_dict(),
matchers.DictMatches(
HostStateTestCase.numa_topology._to_dict()))
class LibvirtDriverTestCase(test.NoDBTestCase):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver."""
def setUp(self):
super(LibvirtDriverTestCase, self).setUp()
self.drvr = libvirt_driver.LibvirtDriver(
fake.FakeVirtAPI(), read_only=True)
self.context = context.get_admin_context()
self.test_image_meta = {
"disk_format": "raw",
}
def _create_instance(self, params=None):
"""Create a test instance."""
if not params:
params = {}
flavor = objects.Flavor(memory_mb=512,
swap=0,
vcpu_weight=None,
root_gb=10,
id=2,
name=u'm1.tiny',
ephemeral_gb=20,
rxtx_factor=1.0,
flavorid=u'1',
vcpus=1)
inst = {}
inst['id'] = 1
inst['uuid'] = '52d3b512-1152-431f-a8f7-28f0288a622b'
inst['os_type'] = 'linux'
inst['image_ref'] = '1'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
inst['instance_type_id'] = 2
inst['ami_launch_index'] = 0
inst['host'] = 'host1'
inst['root_gb'] = flavor.root_gb
inst['ephemeral_gb'] = flavor.ephemeral_gb
inst['config_drive'] = True
inst['kernel_id'] = 2
inst['ramdisk_id'] = 3
inst['key_data'] = 'ABCDEFG'
inst['system_metadata'] = {}
inst['metadata'] = {}
inst['task_state'] = None
inst.update(params)
return objects.Instance(flavor=flavor,
old_flavor=None, new_flavor=None,
**inst)
@staticmethod
def _disk_info(type='qcow2', config_disk=False):
# 10G root and 512M swap disk
disk_info = [{'disk_size': 1, 'type': type,
'virt_disk_size': 10737418240, 'path': '/test/disk',
'backing_file': '/base/disk'},
{'disk_size': 1, 'type': type,
'virt_disk_size': 536870912, 'path': '/test/disk.swap',
'backing_file': '/base/swap_512'}]
if config_disk:
disk_info.append({'disk_size': 1, 'type': 'raw',
'virt_disk_size': 1024,
'path': '/test/disk.config'})
return jsonutils.dumps(disk_info)
def test_migrate_disk_and_power_off_exception(self):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off.
"""
self.counter = 0
self.checked_shared_storage = False
def fake_get_instance_disk_info(instance,
block_device_info=None):
return '[]'
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
def fake_execute(*args, **kwargs):
self.counter += 1
if self.counter == 1:
assert False, "intentional failure"
def fake_os_path_exists(path):
return True
def fake_is_storage_shared(dest, inst_base):
self.checked_shared_storage = True
return False
self.stubs.Set(self.drvr, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.drvr, '_destroy', fake_destroy)
self.stubs.Set(self.drvr, 'get_host_ip_addr',
fake_get_host_ip_addr)
self.stubs.Set(self.drvr, '_is_storage_shared_with',
fake_is_storage_shared)
self.stubs.Set(utils, 'execute', fake_execute)
self.stub_out('os.path.exists', fake_os_path_exists)
ins_ref = self._create_instance()
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
flavor_obj = objects.Flavor(**flavor)
self.assertRaises(AssertionError,
self.drvr.migrate_disk_and_power_off,
context.get_admin_context(), ins_ref, '10.0.0.2',
flavor_obj, None)
def _test_migrate_disk_and_power_off(self, flavor_obj,
block_device_info=None,
params_for_instance=None):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off.
"""
disk_info = self._disk_info()
def fake_get_instance_disk_info(instance,
block_device_info=None):
return disk_info
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
def fake_execute(*args, **kwargs):
pass
def fake_copy_image(src, dest, host=None, receive=False,
on_execute=None, on_completion=None,
compression=True):
self.assertIsNotNone(on_execute)
self.assertIsNotNone(on_completion)
self.stubs.Set(self.drvr, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.drvr, '_destroy', fake_destroy)
self.stubs.Set(self.drvr, 'get_host_ip_addr',
fake_get_host_ip_addr)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(libvirt_utils, 'copy_image', fake_copy_image)
ins_ref = self._create_instance(params=params_for_instance)
# dest is different host case
out = self.drvr.migrate_disk_and_power_off(
context.get_admin_context(), ins_ref, '10.0.0.2',
flavor_obj, None, block_device_info=block_device_info)
self.assertEqual(out, disk_info)
# dest is same host case
out = self.drvr.migrate_disk_and_power_off(
context.get_admin_context(), ins_ref, '10.0.0.1',
flavor_obj, None, block_device_info=block_device_info)
self.assertEqual(out, disk_info)
def test_migrate_disk_and_power_off(self):
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
flavor_obj = objects.Flavor(**flavor)
self._test_migrate_disk_and_power_off(flavor_obj)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
def test_migrate_disk_and_power_off_boot_from_volume(self,
disconnect_volume):
info = {'block_device_mapping': [{'boot_index': None,
'mount_device': '/dev/vdd',
'connection_info': None},
{'boot_index': 0,
'mount_device': '/dev/vda',
'connection_info': None}]}
flavor = {'root_gb': 1, 'ephemeral_gb': 0}
flavor_obj = objects.Flavor(**flavor)
# Note(Mike_D): The size of instance's ephemeral_gb is 0 gb.
self._test_migrate_disk_and_power_off(
flavor_obj, block_device_info=info,
params_for_instance={'image_ref': None, 'ephemeral_gb': 0})
disconnect_volume.assert_called_with(
info['block_device_mapping'][1]['connection_info'], 'vda')
@mock.patch('nova.utils.execute')
@mock.patch('nova.virt.libvirt.utils.copy_image')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
def test_migrate_disk_and_power_off_swap(self, mock_get_disk_info,
get_host_ip_addr,
mock_destroy,
mock_copy_image,
mock_execute):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off.
"""
self.copy_or_move_swap_called = False
disk_info = self._disk_info()
mock_get_disk_info.return_value = disk_info
get_host_ip_addr.return_value = '10.0.0.1'
def fake_copy_image(*args, **kwargs):
# disk.swap should not be touched since it is skipped over
if '/test/disk.swap' in list(args):
self.copy_or_move_swap_called = True
def fake_execute(*args, **kwargs):
# disk.swap should not be touched since it is skipped over
if set(['mv', '/test/disk.swap']).issubset(list(args)):
self.copy_or_move_swap_called = True
mock_copy_image.side_effect = fake_copy_image
mock_execute.side_effect = fake_execute
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Original instance config
instance = self._create_instance({'root_gb': 10,
'ephemeral_gb': 0})
# Re-size fake instance to 20G root and 1024M swap disk
flavor = {'root_gb': 20, 'ephemeral_gb': 0, 'swap': 1024}
flavor_obj = objects.Flavor(**flavor)
# Destination is same host
out = drvr.migrate_disk_and_power_off(context.get_admin_context(),
instance, '10.0.0.1',
flavor_obj, None)
mock_get_disk_info.assert_called_once_with(instance,
block_device_info=None)
self.assertTrue(get_host_ip_addr.called)
mock_destroy.assert_called_once_with(instance)
self.assertFalse(self.copy_or_move_swap_called)
self.assertEqual(disk_info, out)
def _test_migrate_disk_and_power_off_resize_check(self, expected_exc):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtConnection
.migrate_disk_and_power_off.
"""
def fake_get_instance_disk_info(instance, xml=None,
block_device_info=None):
return self._disk_info()
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
self.stubs.Set(self.drvr, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.drvr, '_destroy', fake_destroy)
self.stubs.Set(self.drvr, 'get_host_ip_addr',
fake_get_host_ip_addr)
ins_ref = self._create_instance()
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
flavor_obj = objects.Flavor(**flavor)
# Migration is not implemented for LVM backed instances
self.assertRaises(expected_exc,
self.drvr.migrate_disk_and_power_off,
None, ins_ref, '10.0.0.1', flavor_obj, None)
@mock.patch('nova.utils.execute')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'._is_storage_shared_with')
def _test_migrate_disk_and_power_off_backing_file(self,
shared_storage,
mock_is_shared_storage,
mock_get_disk_info,
mock_destroy,
mock_execute):
self.convert_file_called = False
flavor = {'root_gb': 20, 'ephemeral_gb': 30, 'swap': 0}
flavor_obj = objects.Flavor(**flavor)
disk_info = [{'type': 'qcow2', 'path': '/test/disk',
'virt_disk_size': '10737418240',
'backing_file': '/base/disk',
'disk_size': '83886080'}]
disk_info_text = jsonutils.dumps(disk_info)
mock_get_disk_info.return_value = disk_info_text
mock_is_shared_storage.return_value = shared_storage
def fake_execute(*args, **kwargs):
self.assertNotEqual(args[0:2], ['qemu-img', 'convert'])
mock_execute.side_effect = fake_execute
instance = self._create_instance()
out = self.drvr.migrate_disk_and_power_off(
context.get_admin_context(), instance, '10.0.0.2',
flavor_obj, None)
self.assertTrue(mock_is_shared_storage.called)
mock_destroy.assert_called_once_with(instance)
self.assertEqual(out, disk_info_text)
def test_migrate_disk_and_power_off_shared_storage(self):
self._test_migrate_disk_and_power_off_backing_file(True)
def test_migrate_disk_and_power_off_non_shared_storage(self):
self._test_migrate_disk_and_power_off_backing_file(False)
def test_migrate_disk_and_power_off_lvm(self):
self.flags(images_type='lvm', group='libvirt')
def fake_execute(*args, **kwargs):
pass
self.stubs.Set(utils, 'execute', fake_execute)
expected_exc = exception.InstanceFaultRollback
self._test_migrate_disk_and_power_off_resize_check(expected_exc)
def test_migrate_disk_and_power_off_resize_cannot_ssh(self):
def fake_execute(*args, **kwargs):
raise processutils.ProcessExecutionError()
def fake_is_storage_shared(dest, inst_base):
self.checked_shared_storage = True
return False
self.stubs.Set(self.drvr, '_is_storage_shared_with',
fake_is_storage_shared)
self.stubs.Set(utils, 'execute', fake_execute)
expected_exc = exception.InstanceFaultRollback
self._test_migrate_disk_and_power_off_resize_check(expected_exc)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
def test_migrate_disk_and_power_off_resize_error(self, mock_get_disk_info):
instance = self._create_instance()
flavor = {'root_gb': 5, 'ephemeral_gb': 10}
flavor_obj = objects.Flavor(**flavor)
mock_get_disk_info.return_value = self._disk_info()
self.assertRaises(
exception.InstanceFaultRollback,
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
def test_migrate_disk_and_power_off_resize_error_default_ephemeral(
self, mock_get_disk_info):
# Note(Mike_D): The size of this instance's ephemeral_gb is 20 gb.
instance = self._create_instance()
flavor = {'root_gb': 10, 'ephemeral_gb': 0}
flavor_obj = objects.Flavor(**flavor)
mock_get_disk_info.return_value = self._disk_info()
self.assertRaises(exception.InstanceFaultRollback,
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
@mock.patch('nova.virt.driver.block_device_info_get_ephemerals')
def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get,
mock_get_disk_info):
mappings = [
{
'device_name': '/dev/sdb4',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': 'swap',
'boot_index': -1,
'volume_size': 1
},
{
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'device_type': 'disk',
'volume_id': 1,
'guest_format': None,
'boot_index': 1,
'volume_size': 6
},
{
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'snapshot_id': 1,
'device_type': 'disk',
'guest_format': None,
'boot_index': 0,
'volume_size': 4
},
{
'device_name': '/dev/sda3',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1,
'volume_size': 3
}
]
mock_get.return_value = mappings
instance = self._create_instance()
# Old flavor, eph is 20, real disk is 3, target is 2, fail
flavor = {'root_gb': 10, 'ephemeral_gb': 2}
flavor_obj = objects.Flavor(**flavor)
mock_get_disk_info.return_value = self._disk_info()
self.assertRaises(
exception.InstanceFaultRollback,
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
# Old flavor, eph is 20, real disk is 3, target is 4
flavor = {'root_gb': 10, 'ephemeral_gb': 4}
flavor_obj = objects.Flavor(**flavor)
self._test_migrate_disk_and_power_off(flavor_obj)
@mock.patch('nova.utils.execute')
@mock.patch('nova.virt.libvirt.utils.copy_image')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'._is_storage_shared_with')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
def test_migrate_disk_and_power_off_resize_copy_disk_info(self,
mock_disk_info,
mock_shared,
mock_path,
mock_destroy,
mock_copy,
mock_execuate):
instance = self._create_instance()
disk_info = self._disk_info()
disk_info_text = jsonutils.loads(disk_info)
instance_base = os.path.dirname(disk_info_text[0]['path'])
flavor = {'root_gb': 10, 'ephemeral_gb': 25}
flavor_obj = objects.Flavor(**flavor)
mock_disk_info.return_value = disk_info
mock_path.return_value = instance_base
mock_shared.return_value = False
src_disk_info_path = os.path.join(instance_base + '_resize',
'disk.info')
with mock.patch.object(os.path, 'exists', autospec=True) \
as mock_exists:
# disk.info exists on the source
mock_exists.side_effect = \
lambda path: path == src_disk_info_path
self.drvr.migrate_disk_and_power_off(context.get_admin_context(),
instance, mock.sentinel,
flavor_obj, None)
self.assertTrue(mock_exists.called)
dst_disk_info_path = os.path.join(instance_base, 'disk.info')
mock_copy.assert_any_call(src_disk_info_path, dst_disk_info_path,
host=mock.sentinel, on_execute=mock.ANY,
on_completion=mock.ANY)
def test_wait_for_running(self):
def fake_get_info(instance):
if instance['name'] == "not_found":
raise exception.InstanceNotFound(instance_id=instance['uuid'])
elif instance['name'] == "running":
return hardware.InstanceInfo(state=power_state.RUNNING)
else:
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
self.stubs.Set(self.drvr, 'get_info',
fake_get_info)
# instance not found case
self.assertRaises(exception.InstanceNotFound,
self.drvr._wait_for_running,
{'name': 'not_found',
'uuid': 'not_found_uuid'})
# instance is running case
self.assertRaises(loopingcall.LoopingCallDone,
self.drvr._wait_for_running,
{'name': 'running',
'uuid': 'running_uuid'})
# else case
self.drvr._wait_for_running({'name': 'else',
'uuid': 'other_uuid'})
def test_disk_size_from_instance_disk_info(self):
instance_data = {'root_gb': 10, 'ephemeral_gb': 20, 'swap_gb': 30}
inst = objects.Instance(**instance_data)
self.assertEqual(10 * units.Gi,
self.drvr._disk_size_from_instance(inst, 'disk'))
self.assertEqual(20 * units.Gi,
self.drvr._disk_size_from_instance(inst,
'disk.local'))
self.assertEqual(0,
self.drvr._disk_size_from_instance(inst, 'disk.swap'))
@mock.patch('nova.utils.execute')
def test_disk_raw_to_qcow2(self, mock_execute):
path = '/test/disk'
_path_qcow = path + '_qcow'
self.drvr._disk_raw_to_qcow2(path)
mock_execute.assert_has_calls([
mock.call('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', path, _path_qcow),
mock.call('mv', _path_qcow, path)])
@mock.patch('nova.utils.execute')
def test_disk_qcow2_to_raw(self, mock_execute):
path = '/test/disk'
_path_raw = path + '_raw'
self.drvr._disk_qcow2_to_raw(path)
mock_execute.assert_has_calls([
mock.call('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', path, _path_raw),
mock.call('mv', _path_raw, path)])
@mock.patch('nova.virt.disk.api.extend')
def test_disk_resize_raw(self, mock_extend):
image = imgmodel.LocalFileImage("/test/disk",
imgmodel.FORMAT_RAW)
self.drvr._disk_resize(image, 50)
mock_extend.assert_called_once_with(image, 50)
@mock.patch('nova.virt.disk.api.can_resize_image')
@mock.patch('nova.virt.disk.api.is_image_extendable')
@mock.patch('nova.virt.disk.api.extend')
def test_disk_resize_qcow2(
self, mock_extend, mock_can_resize, mock_is_image_extendable):
with test.nested(
mock.patch.object(
self.drvr, '_disk_qcow2_to_raw'),
mock.patch.object(
self.drvr, '_disk_raw_to_qcow2'))\
as (mock_disk_qcow2_to_raw, mock_disk_raw_to_qcow2):
mock_can_resize.return_value = True
mock_is_image_extendable.return_value = True
imageqcow2 = imgmodel.LocalFileImage("/test/disk",
imgmodel.FORMAT_QCOW2)
imageraw = imgmodel.LocalFileImage("/test/disk",
imgmodel.FORMAT_RAW)
self.drvr._disk_resize(imageqcow2, 50)
mock_disk_qcow2_to_raw.assert_called_once_with(imageqcow2.path)
mock_extend.assert_called_once_with(imageraw, 50)
mock_disk_raw_to_qcow2.assert_called_once_with(imageqcow2.path)
def _test_finish_migration(self, power_on, resize_instance=False):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_migration.
"""
powered_on = power_on
self.fake_create_domain_called = False
self.fake_disk_resize_called = False
create_image_called = [False]
def fake_to_xml(context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
return ""
def fake_plug_vifs(instance, network_info):
pass
def fake_create_image(context, inst,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, inject_files=True,
fallback_from_host=None):
self.assertFalse(inject_files)
create_image_called[0] = True
def fake_create_domain_and_network(
context, xml, instance, network_info, disk_info,
block_device_info=None, power_on=True, reboot=False,
vifs_already_plugged=False):
self.fake_create_domain_called = True
self.assertEqual(powered_on, power_on)
self.assertTrue(vifs_already_plugged)
def fake_enable_hairpin():
pass
def fake_execute(*args, **kwargs):
pass
def fake_get_info(instance):
if powered_on:
return hardware.InstanceInfo(state=power_state.RUNNING)
else:
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
def fake_disk_resize(image, size):
# Assert that _create_image is called before disk resize,
# otherwise we might be trying to resize a disk whose backing
# file hasn't been fetched, yet.
self.assertTrue(create_image_called[0])
self.fake_disk_resize_called = True
self.flags(use_cow_images=True)
self.stubs.Set(self.drvr, '_disk_resize',
fake_disk_resize)
self.stubs.Set(self.drvr, '_get_guest_xml', fake_to_xml)
self.stubs.Set(self.drvr, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(self.drvr, '_create_image',
fake_create_image)
self.stubs.Set(self.drvr, '_create_domain_and_network',
fake_create_domain_and_network)
self.stubs.Set(nova.virt.libvirt.guest.Guest, 'enable_hairpin',
fake_enable_hairpin)
self.stubs.Set(utils, 'execute', fake_execute)
fw = base_firewall.NoopFirewallDriver()
self.stubs.Set(self.drvr, 'firewall_driver', fw)
self.stubs.Set(self.drvr, 'get_info',
fake_get_info)
ins_ref = self._create_instance()
migration = objects.Migration()
migration.source_compute = 'fake-source-compute'
migration.dest_compute = 'fake-dest-compute'
migration.source_node = 'fake-source-node'
migration.dest_node = 'fake-dest-node'
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
# Source disks are raw to test conversion
disk_info = self._disk_info(type='raw', config_disk=True)
with mock.patch.object(self.drvr, '_disk_raw_to_qcow2',
autospec=True) as mock_raw_to_qcow2:
self.drvr.finish_migration(
context.get_admin_context(), migration, ins_ref,
disk_info, [], image_meta,
resize_instance, None, power_on)
# Assert that we converted the root and swap disks
convert_calls = [mock.call('/test/disk'),
mock.call('/test/disk.swap')]
mock_raw_to_qcow2.assert_has_calls(convert_calls, any_order=True)
# Implicitly assert that we did not convert the config disk
self.assertEqual(len(convert_calls), mock_raw_to_qcow2.call_count)
self.assertTrue(self.fake_create_domain_called)
self.assertEqual(
resize_instance, self.fake_disk_resize_called)
def test_finish_migration_resize(self):
self._test_finish_migration(True, resize_instance=True)
def test_finish_migration_power_on(self):
self._test_finish_migration(True)
def test_finish_migration_power_off(self):
self._test_finish_migration(False)
def _test_finish_revert_migration(self, power_on):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_revert_migration.
"""
powered_on = power_on
self.fake_create_domain_called = False
def fake_execute(*args, **kwargs):
pass
def fake_plug_vifs(instance, network_info):
pass
def fake_create_domain(context, xml, instance, network_info,
disk_info, block_device_info=None,
power_on=None,
vifs_already_plugged=None):
self.fake_create_domain_called = True
self.assertEqual(powered_on, power_on)
self.assertTrue(vifs_already_plugged)
return mock.MagicMock()
def fake_enable_hairpin():
pass
def fake_get_info(instance):
if powered_on:
return hardware.InstanceInfo(state=power_state.RUNNING)
else:
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
def fake_to_xml(context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None):
return ""
self.stubs.Set(self.drvr, '_get_guest_xml', fake_to_xml)
self.stubs.Set(self.drvr, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(utils, 'execute', fake_execute)
fw = base_firewall.NoopFirewallDriver()
self.stubs.Set(self.drvr, 'firewall_driver', fw)
self.stubs.Set(self.drvr, '_create_domain_and_network',
fake_create_domain)
self.stubs.Set(nova.virt.libvirt.guest.Guest, 'enable_hairpin',
fake_enable_hairpin)
self.stubs.Set(self.drvr, 'get_info',
fake_get_info)
self.stubs.Set(utils, 'get_image_from_system_metadata',
lambda *a: self.test_image_meta)
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
ins_ref = self._create_instance()
os.mkdir(os.path.join(tmpdir, ins_ref['name']))
libvirt_xml_path = os.path.join(tmpdir,
ins_ref['name'],
'libvirt.xml')
f = open(libvirt_xml_path, 'w')
f.close()
self.drvr.finish_revert_migration(
context.get_admin_context(), ins_ref,
[], None, power_on)
self.assertTrue(self.fake_create_domain_called)
def test_finish_revert_migration_power_on(self):
self._test_finish_revert_migration(True)
def test_finish_revert_migration_power_off(self):
self._test_finish_revert_migration(False)
def _test_finish_revert_migration_after_crash(self, backup_made=True,
del_inst_failed=False):
class FakeLoopingCall(object):
def start(self, *a, **k):
return self
def wait(self):
return None
context = 'fake_context'
instance = self._create_instance()
self.mox.StubOutWithMock(imagebackend.Backend, 'image')
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(shutil, 'rmtree')
self.mox.StubOutWithMock(utils, 'execute')
self.stubs.Set(blockinfo, 'get_disk_info', lambda *a: None)
self.stubs.Set(self.drvr, '_get_guest_xml',
lambda *a, **k: None)
self.stubs.Set(self.drvr, '_create_domain_and_network',
lambda *a, **kw: None)
self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
libvirt_utils.get_instance_path(instance).AndReturn('/fake/foo')
os.path.exists('/fake/foo_resize').AndReturn(backup_made)
if backup_made:
if del_inst_failed:
os_error = OSError(errno.ENOENT, 'No such file or directory')
shutil.rmtree('/fake/foo').AndRaise(os_error)
else:
shutil.rmtree('/fake/foo')
utils.execute('mv', '/fake/foo_resize', '/fake/foo')
imagebackend.Backend.image(mox.IgnoreArg(), 'disk').AndReturn(
fake_imagebackend.Raw())
self.mox.StubOutWithMock(fake_imagebackend.Raw, 'check_image_exists')
fake_imagebackend.Raw.check_image_exists().AndReturn(True)
self.mox.ReplayAll()
self.drvr.finish_revert_migration(context, instance, [])
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(backup_made=True)
def test_finish_revert_migration_after_crash_before_new(self):
self._test_finish_revert_migration_after_crash(backup_made=True)
def test_finish_revert_migration_after_crash_before_backup(self):
self._test_finish_revert_migration_after_crash(backup_made=False)
def test_finish_revert_migration_after_crash_delete_failed(self):
self._test_finish_revert_migration_after_crash(backup_made=True,
del_inst_failed=True)
def test_finish_revert_migration_preserves_disk_bus(self):
def fake_get_guest_xml(context, instance, network_info, disk_info,
image_meta, block_device_info=None):
self.assertEqual('ide', disk_info['disk_bus'])
image_meta = {"disk_format": "raw",
"properties": {"hw_disk_bus": "ide"}}
instance = self._create_instance()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(drvr, '_create_domain_and_network'),
mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=image_meta),
mock.patch.object(drvr, '_get_guest_xml',
side_effect=fake_get_guest_xml)):
drvr.finish_revert_migration('', instance, None, power_on=False)
def test_finish_revert_migration_snap_backend(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock()
drvr.image_backend.image.return_value = drvr.image_backend
ins_ref = self._create_instance()
with test.nested(
mock.patch.object(utils, 'get_image_from_system_metadata'),
mock.patch.object(drvr, '_create_domain_and_network'),
mock.patch.object(drvr, '_get_guest_xml')) as (
mock_image, mock_cdn, mock_ggx):
mock_image.return_value = {'disk_format': 'raw'}
drvr.finish_revert_migration('', ins_ref, None, power_on=False)
drvr.image_backend.rollback_to_snap.assert_called_once_with(
libvirt_utils.RESIZE_SNAPSHOT_NAME)
drvr.image_backend.remove_snap.assert_called_once_with(
libvirt_utils.RESIZE_SNAPSHOT_NAME, ignore_errors=True)
def test_finish_revert_migration_snap_backend_snapshot_not_found(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock()
drvr.image_backend.image.return_value = drvr.image_backend
ins_ref = self._create_instance()
with test.nested(
mock.patch.object(rbd_utils, 'RBDDriver'),
mock.patch.object(utils, 'get_image_from_system_metadata'),
mock.patch.object(drvr, '_create_domain_and_network'),
mock.patch.object(drvr, '_get_guest_xml')) as (
mock_rbd, mock_image, mock_cdn, mock_ggx):
mock_image.return_value = {'disk_format': 'raw'}
mock_rbd.rollback_to_snap.side_effect = exception.SnapshotNotFound(
snapshot_id='testing')
drvr.finish_revert_migration('', ins_ref, None, power_on=False)
drvr.image_backend.remove_snap.assert_called_once_with(
libvirt_utils.RESIZE_SNAPSHOT_NAME, ignore_errors=True)
def test_finish_revert_migration_snap_backend_image_does_not_exist(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock()
drvr.image_backend.image.return_value = drvr.image_backend
drvr.image_backend.check_image_exists.return_value = False
ins_ref = self._create_instance()
with test.nested(
mock.patch.object(rbd_utils, 'RBDDriver'),
mock.patch.object(utils, 'get_image_from_system_metadata'),
mock.patch.object(drvr, '_create_domain_and_network'),
mock.patch.object(drvr, '_get_guest_xml')) as (
mock_rbd, mock_image, mock_cdn, mock_ggx):
mock_image.return_value = {'disk_format': 'raw'}
drvr.finish_revert_migration('', ins_ref, None, power_on=False)
self.assertFalse(drvr.image_backend.rollback_to_snap.called)
self.assertFalse(drvr.image_backend.remove_snap.called)
def test_cleanup_failed_migration(self):
self.mox.StubOutWithMock(shutil, 'rmtree')
shutil.rmtree('/fake/inst')
self.mox.ReplayAll()
self.drvr._cleanup_failed_migration('/fake/inst')
def test_confirm_migration(self):
ins_ref = self._create_instance()
self.mox.StubOutWithMock(self.drvr, "_cleanup_resize")
self.drvr._cleanup_resize(ins_ref,
_fake_network_info(self, 1))
self.mox.ReplayAll()
self.drvr.confirm_migration("migration_ref", ins_ref,
_fake_network_info(self, 1))
def test_cleanup_resize_same_host(self):
CONF.set_override('policy_dirs', [], group='oslo_policy')
ins_ref = self._create_instance({'host': CONF.host})
def fake_os_path_exists(path):
return True
self.stub_out('os.path.exists', fake_os_path_exists)
self.mox.StubOutWithMock(imagebackend.Backend, 'image')
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(utils, 'execute')
libvirt_utils.get_instance_path(ins_ref,
forceold=True).AndReturn('/fake/inst')
utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
attempts=5)
imagebackend.Backend.image(ins_ref, 'disk').AndReturn(
fake_imagebackend.Raw())
self.mox.StubOutWithMock(fake_imagebackend.Raw, 'check_image_exists')
fake_imagebackend.Raw.check_image_exists().AndReturn(True)
self.mox.ReplayAll()
self.drvr._cleanup_resize(ins_ref,
_fake_network_info(self, 1))
def test_cleanup_resize_not_same_host(self):
CONF.set_override('policy_dirs', [], group='oslo_policy')
host = 'not' + CONF.host
ins_ref = self._create_instance({'host': host})
def fake_os_path_exists(path):
return True
def fake_undefine_domain(instance):
pass
def fake_unplug_vifs(instance, network_info, ignore_errors=False):
pass
def fake_unfilter_instance(instance, network_info):
pass
self.stub_out('os.path.exists', fake_os_path_exists)
self.stubs.Set(self.drvr, '_undefine_domain',
fake_undefine_domain)
self.stubs.Set(self.drvr, 'unplug_vifs',
fake_unplug_vifs)
self.stubs.Set(self.drvr.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
self.mox.StubOutWithMock(imagebackend.Backend, 'image')
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(utils, 'execute')
libvirt_utils.get_instance_path(ins_ref,
forceold=True).AndReturn('/fake/inst')
utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
attempts=5)
imagebackend.Backend.image(ins_ref, 'disk').AndReturn(
fake_imagebackend.Raw())
self.mox.StubOutWithMock(fake_imagebackend.Raw, 'check_image_exists')
fake_imagebackend.Raw.check_image_exists().AndReturn(True)
self.mox.ReplayAll()
self.drvr._cleanup_resize(ins_ref,
_fake_network_info(self, 1))
def test_cleanup_resize_snap_backend(self):
CONF.set_override('policy_dirs', [], group='oslo_policy')
ins_ref = self._create_instance({'host': CONF.host})
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock()
drvr.image_backend.image.return_value = drvr.image_backend
with test.nested(
mock.patch.object(os.path, 'exists'),
mock.patch.object(libvirt_utils, 'get_instance_path'),
mock.patch.object(utils, 'execute'),
mock.patch.object(drvr.image_backend, 'remove_snap')) as (
mock_exists, mock_get_path, mock_exec, mock_remove):
mock_exists.return_value = True
mock_get_path.return_value = '/fake/inst'
drvr._cleanup_resize(ins_ref, _fake_network_info(self, 1))
mock_get_path.assert_called_once_with(ins_ref, forceold=True)
mock_exec.assert_called_once_with('rm', '-rf', '/fake/inst_resize',
delay_on_retry=True, attempts=5)
mock_remove.assert_called_once_with(
libvirt_utils.RESIZE_SNAPSHOT_NAME, ignore_errors=True)
def test_cleanup_resize_snap_backend_image_does_not_exist(self):
CONF.set_override('policy_dirs', [], group='oslo_policy')
ins_ref = self._create_instance({'host': CONF.host})
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.image_backend = mock.Mock()
drvr.image_backend.image.return_value = drvr.image_backend
drvr.image_backend.check_image_exists.return_value = False
with test.nested(
mock.patch.object(os.path, 'exists'),
mock.patch.object(libvirt_utils, 'get_instance_path'),
mock.patch.object(utils, 'execute'),
mock.patch.object(drvr.image_backend, 'remove_snap')) as (
mock_exists, mock_get_path, mock_exec, mock_remove):
mock_exists.return_value = True
mock_get_path.return_value = '/fake/inst'
drvr._cleanup_resize(ins_ref, _fake_network_info(self, 1))
mock_get_path.assert_called_once_with(ins_ref, forceold=True)
mock_exec.assert_called_once_with('rm', '-rf', '/fake/inst_resize',
delay_on_retry=True, attempts=5)
self.assertFalse(mock_remove.called)
def test_get_instance_disk_info_exception(self):
instance = self._create_instance()
class FakeExceptionDomain(FakeVirtDomain):
def __init__(self):
super(FakeExceptionDomain, self).__init__()
def XMLDesc(self, flags):
raise fakelibvirt.libvirtError("Libvirt error")
def fake_get_domain(self, instance):
return FakeExceptionDomain()
self.stubs.Set(host.Host, 'get_domain',
fake_get_domain)
self.assertRaises(exception.InstanceNotFound,
self.drvr.get_instance_disk_info,
instance)
@mock.patch('os.path.exists')
@mock.patch.object(lvm, 'list_volumes')
def test_lvm_disks(self, listlvs, exists):
instance = objects.Instance(uuid='fake-uuid', id=1)
self.flags(images_volume_group='vols', group='libvirt')
exists.return_value = True
listlvs.return_value = ['fake-uuid_foo',
'other-uuid_foo']
disks = self.drvr._lvm_disks(instance)
self.assertEqual(['/dev/vols/fake-uuid_foo'], disks)
def test_is_booted_from_volume(self):
func = libvirt_driver.LibvirtDriver._is_booted_from_volume
instance, disk_mapping = {}, {}
self.assertTrue(func(instance, disk_mapping))
disk_mapping['disk'] = 'map'
self.assertTrue(func(instance, disk_mapping))
instance['image_ref'] = 'uuid'
self.assertFalse(func(instance, disk_mapping))
@mock.patch('nova.virt.netutils.get_injected_network_template')
@mock.patch('nova.virt.disk.api.inject_data')
@mock.patch.object(libvirt_driver.LibvirtDriver, "_conn")
def _test_inject_data(self, driver_params, path, disk_params,
mock_conn, disk_inject_data, inj_network,
called=True):
class ImageBackend(object):
path = '/path'
def check_image_exists(self):
if self.path == '/fail/path':
return False
return True
def get_model(self, connection):
return imgmodel.LocalFileImage(self.path,
imgmodel.FORMAT_RAW)
def fake_inj_network(*args, **kwds):
return args[0] or None
inj_network.side_effect = fake_inj_network
image_backend = ImageBackend()
image_backend.path = path
with mock.patch.object(
self.drvr.image_backend,
'image',
return_value=image_backend):
self.flags(inject_partition=0, group='libvirt')
self.drvr._inject_data(**driver_params)
if called:
disk_inject_data.assert_called_once_with(
mock.ANY,
*disk_params,
partition=None, mandatory=('files',))
self.assertEqual(disk_inject_data.called, called)
def _test_inject_data_default_driver_params(self, **params):
return {
'instance': self._create_instance(params=params),
'network_info': None,
'admin_pass': None,
'files': None,
'suffix': ''
}
def test_inject_data_adminpass(self):
self.flags(inject_password=True, group='libvirt')
driver_params = self._test_inject_data_default_driver_params()
driver_params['admin_pass'] = 'foobar'
disk_params = [
None, # key
None, # net
{}, # metadata
'foobar', # admin_pass
None, # files
]
self._test_inject_data(driver_params, "/path", disk_params)
# Test with the configuration setted to false.
self.flags(inject_password=False, group='libvirt')
self._test_inject_data(driver_params, "/path",
disk_params, called=False)
def test_inject_data_key(self):
driver_params = self._test_inject_data_default_driver_params()
driver_params['instance']['key_data'] = 'key-content'
self.flags(inject_key=True, group='libvirt')
disk_params = [
'key-content', # key
None, # net
{}, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(driver_params, "/path", disk_params)
# Test with the configuration setted to false.
self.flags(inject_key=False, group='libvirt')
self._test_inject_data(driver_params, "/path",
disk_params, called=False)
def test_inject_data_metadata(self):
instance_metadata = {'metadata': {'data': 'foo'}}
driver_params = self._test_inject_data_default_driver_params(
**instance_metadata
)
disk_params = [
None, # key
None, # net
{'data': 'foo'}, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(driver_params, "/path", disk_params)
def test_inject_data_files(self):
driver_params = self._test_inject_data_default_driver_params()
driver_params['files'] = ['file1', 'file2']
disk_params = [
None, # key
None, # net
{}, # metadata
None, # admin_pass
['file1', 'file2'], # files
]
self._test_inject_data(driver_params, "/path", disk_params)
def test_inject_data_net(self):
driver_params = self._test_inject_data_default_driver_params()
driver_params['network_info'] = {'net': 'eno1'}
disk_params = [
None, # key
{'net': 'eno1'}, # net
{}, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(driver_params, "/path", disk_params)
def test_inject_not_exist_image(self):
driver_params = self._test_inject_data_default_driver_params()
disk_params = [
'key-content', # key
None, # net
None, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(driver_params, "/fail/path",
disk_params, called=False)
def _test_attach_detach_interface(self, method, power_state,
expected_flags):
instance = self._create_instance()
network_info = _fake_network_info(self, 1)
domain = FakeVirtDomain()
self.mox.StubOutWithMock(host.Host, 'get_domain')
self.mox.StubOutWithMock(self.drvr.firewall_driver,
'setup_basic_filtering')
self.mox.StubOutWithMock(domain, 'attachDeviceFlags')
self.mox.StubOutWithMock(domain, 'info')
host.Host.get_domain(instance).AndReturn(domain)
if method == 'attach_interface':
self.drvr.firewall_driver.setup_basic_filtering(
instance, [network_info[0]])
fake_image_meta = objects.ImageMeta.from_dict(
{'id': instance.image_ref})
expected = self.drvr.vif_driver.get_config(
instance, network_info[0], fake_image_meta, instance.flavor,
CONF.libvirt.virt_type, self.drvr._host)
self.mox.StubOutWithMock(self.drvr.vif_driver,
'get_config')
self.drvr.vif_driver.get_config(
instance, network_info[0],
mox.IsA(objects.ImageMeta),
mox.IsA(objects.Flavor),
CONF.libvirt.virt_type,
self.drvr._host).AndReturn(expected)
domain.info().AndReturn([power_state, 1, 2, 3, 4])
if method == 'attach_interface':
domain.attachDeviceFlags(expected.to_xml(), flags=expected_flags)
elif method == 'detach_interface':
domain.detachDeviceFlags(expected.to_xml(), expected_flags)
self.mox.ReplayAll()
if method == 'attach_interface':
self.drvr.attach_interface(
instance, fake_image_meta, network_info[0])
elif method == 'detach_interface':
self.drvr.detach_interface(
instance, network_info[0])
self.mox.VerifyAll()
def test_attach_interface_with_running_instance(self):
self._test_attach_detach_interface(
'attach_interface', power_state.RUNNING,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_attach_interface_with_pause_instance(self):
self._test_attach_detach_interface(
'attach_interface', power_state.PAUSED,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_attach_interface_with_shutdown_instance(self):
self._test_attach_detach_interface(
'attach_interface', power_state.SHUTDOWN,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG))
def test_detach_interface_with_running_instance(self):
self._test_attach_detach_interface(
'detach_interface', power_state.RUNNING,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_detach_interface_with_pause_instance(self):
self._test_attach_detach_interface(
'detach_interface', power_state.PAUSED,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_detach_interface_with_shutdown_instance(self):
self._test_attach_detach_interface(
'detach_interface', power_state.SHUTDOWN,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG))
@mock.patch('nova.virt.libvirt.driver.LOG')
def test_detach_interface_device_not_found(self, mock_log):
# Asserts that we don't log an error when the interface device is not
# found on the guest after a libvirt error during detach.
instance = self._create_instance()
vif = _fake_network_info(self, 1)[0]
guest = mock.Mock(spec='nova.virt.libvirt.guest.Guest')
guest.get_power_state = mock.Mock()
self.drvr._host.get_guest = mock.Mock(return_value=guest)
self.drvr.vif_driver = mock.Mock()
error = fakelibvirt.libvirtError(
'no matching network device was found')
error.err = (fakelibvirt.VIR_ERR_OPERATION_FAILED,)
guest.detach_device = mock.Mock(side_effect=error)
# mock out that get_interface_by_mac doesn't find the interface
guest.get_interface_by_mac = mock.Mock(return_value=None)
self.drvr.detach_interface(instance, vif)
guest.get_interface_by_mac.assert_called_once_with(vif['address'])
# an error shouldn't be logged, but a warning should be logged
self.assertFalse(mock_log.error.called)
self.assertEqual(1, mock_log.warning.call_count)
self.assertIn('the device is no longer found on the guest',
six.text_type(mock_log.warning.call_args[0]))
def test_rescue(self):
instance = self._create_instance({'config_drive': None})
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
network_info = _fake_network_info(self, 1)
self.mox.StubOutWithMock(self.drvr,
'_get_existing_domain_xml')
self.mox.StubOutWithMock(libvirt_utils, 'write_to_file')
self.mox.StubOutWithMock(imagebackend.Backend, 'image')
self.mox.StubOutWithMock(imagebackend.Image, 'cache')
self.mox.StubOutWithMock(self.drvr, '_get_guest_xml')
self.mox.StubOutWithMock(self.drvr, '_destroy')
self.mox.StubOutWithMock(self.drvr, '_create_domain')
self.drvr._get_existing_domain_xml(mox.IgnoreArg(),
mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml)
libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
imagebackend.Backend.image(instance, 'kernel.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'disk.rescue', 'default'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename=mox.IgnoreArg(),
image_id=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
user_id=mox.IgnoreArg()).MultipleTimes()
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename=mox.IgnoreArg(),
image_id=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
size=None, user_id=mox.IgnoreArg())
image_meta = objects.ImageMeta.from_dict(
{'id': 'fake', 'name': 'fake'})
self.drvr._get_guest_xml(mox.IgnoreArg(), instance,
network_info, mox.IgnoreArg(),
image_meta,
rescue=mox.IgnoreArg(),
write_to_disk=mox.IgnoreArg()
).AndReturn(dummyxml)
self.drvr._destroy(instance)
self.drvr._create_domain(mox.IgnoreArg())
self.mox.ReplayAll()
rescue_password = 'fake_password'
self.drvr.rescue(self.context, instance,
network_info, image_meta, rescue_password)
self.mox.VerifyAll()
@mock.patch.object(libvirt_utils, 'get_instance_path')
@mock.patch.object(libvirt_utils, 'load_file')
@mock.patch.object(host.Host, "get_domain")
def test_unrescue(self, mock_get_domain, mock_load_file,
mock_get_instance_path):
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='block' device='disk'>"
"<source dev='/dev/some-vg/some-lv'/>"
"<target dev='vda' bus='virtio'/></disk>"
"</devices></domain>")
mock_get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake=uuid', id=1)
fake_dom = FakeVirtDomain(fake_xml=dummyxml)
mock_get_domain.return_value = fake_dom
mock_load_file.return_value = "fake_unrescue_xml"
unrescue_xml_path = os.path.join('/path', 'unrescue.xml')
xml_path = os.path.join('/path', 'libvirt.xml')
rescue_file = os.path.join('/path', 'rescue.file')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
mock.patch.object(libvirt_utils, 'write_to_file'),
mock.patch.object(drvr, '_destroy'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(libvirt_utils, 'file_delete'),
mock.patch.object(drvr, '_lvm_disks',
return_value=['lvm.rescue']),
mock.patch.object(lvm, 'remove_volumes'),
mock.patch.object(glob, 'iglob', return_value=[rescue_file])
) as (mock_write, mock_destroy, mock_create, mock_del,
mock_lvm_disks, mock_remove_volumes, mock_glob):
drvr.unrescue(instance, None)
mock_write.assert_called_once_with(xml_path, "fake_unrescue_xml")
mock_destroy.assert_called_once_with(instance)
mock_create.assert_called_once_with("fake_unrescue_xml",
fake_dom)
self.assertEqual(2, mock_del.call_count)
self.assertEqual(unrescue_xml_path,
mock_del.call_args_list[0][0][0])
self.assertEqual(rescue_file, mock_del.call_args_list[1][0][0])
mock_remove_volumes.assert_called_once_with(['lvm.rescue'])
@mock.patch(
'nova.virt.configdrive.ConfigDriveBuilder.add_instance_metadata')
@mock.patch('nova.virt.configdrive.ConfigDriveBuilder.make_drive')
def test_rescue_config_drive(self, mock_make, mock_add):
instance = self._create_instance()
uuid = instance.uuid
configdrive_path = uuid + '/disk.config.rescue'
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
network_info = _fake_network_info(self, 1)
self.mox.StubOutWithMock(self.drvr,
'_get_existing_domain_xml')
self.mox.StubOutWithMock(libvirt_utils, 'write_to_file')
self.mox.StubOutWithMock(imagebackend.Backend, 'image')
self.mox.StubOutWithMock(imagebackend.Image, 'cache')
self.mox.StubOutWithMock(instance_metadata.InstanceMetadata,
'__init__')
self.mox.StubOutWithMock(self.drvr, '_get_guest_xml')
self.mox.StubOutWithMock(self.drvr, '_destroy')
self.mox.StubOutWithMock(self.drvr, '_create_domain')
self.drvr._get_existing_domain_xml(mox.IgnoreArg(),
mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml)
libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
imagebackend.Backend.image(instance, 'kernel.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'disk.rescue', 'default'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'disk.config.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename=mox.IgnoreArg(),
image_id=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
user_id=mox.IgnoreArg()).MultipleTimes()
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename=mox.IgnoreArg(),
image_id=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
size=None, user_id=mox.IgnoreArg())
instance_metadata.InstanceMetadata.__init__(mox.IgnoreArg(),
content=mox.IgnoreArg(),
extra_md=mox.IgnoreArg(),
network_info=mox.IgnoreArg())
image_meta = objects.ImageMeta.from_dict(
{'id': 'fake', 'name': 'fake'})
self.drvr._get_guest_xml(mox.IgnoreArg(), instance,
network_info, mox.IgnoreArg(),
image_meta,
rescue=mox.IgnoreArg(),
write_to_disk=mox.IgnoreArg()
).AndReturn(dummyxml)
self.drvr._destroy(instance)
self.drvr._create_domain(mox.IgnoreArg())
self.mox.ReplayAll()
rescue_password = 'fake_password'
self.drvr.rescue(self.context, instance, network_info,
image_meta, rescue_password)
self.mox.VerifyAll()
mock_add.assert_any_call(mock.ANY)
expected_call = [mock.call(os.path.join(CONF.instances_path,
configdrive_path))]
mock_make.assert_has_calls(expected_call)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files(self, get_instance_path, exists, exe,
shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
exe.assert_called_with('mv', '/path', '/path_del')
shutil.assert_called_with('/path_del')
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('os.kill')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_kill_running(
self, get_instance_path, kill, exists, exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
self.drvr.job_tracker.jobs[instance.uuid] = [3, 4]
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
exe.assert_called_with('mv', '/path', '/path_del')
kill.assert_has_calls([mock.call(3, signal.SIGKILL), mock.call(3, 0),
mock.call(4, signal.SIGKILL), mock.call(4, 0)])
shutil.assert_called_with('/path_del')
self.assertTrue(result)
self.assertNotIn(instance.uuid, self.drvr.job_tracker.jobs)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_resize(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = [Exception(), None]
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')]
self.assertEqual(expected, exe.mock_calls)
shutil.assert_called_with('/path_del')
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_failed(self, get_instance_path, exists, exe,
shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
exists.side_effect = [False, False, True, True]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
exe.assert_called_with('mv', '/path', '/path_del')
shutil.assert_called_with('/path_del')
self.assertFalse(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_mv_failed(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = Exception()
exists.side_effect = [True, True]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')] * 2
self.assertEqual(expected, exe.mock_calls)
self.assertFalse(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_resume(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = Exception()
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')] * 2
self.assertEqual(expected, exe.mock_calls)
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_none(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = Exception()
exists.side_effect = [False, False, False, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')] * 2
self.assertEqual(expected, exe.mock_calls)
self.assertEqual(0, len(shutil.mock_calls))
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_concurrent(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = [Exception(), Exception(), None]
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')]
expected.append(expected[0])
self.assertEqual(expected, exe.mock_calls)
shutil.assert_called_with('/path_del')
self.assertTrue(result)
def _assert_on_id_map(self, idmap, klass, start, target, count):
self.assertIsInstance(idmap, klass)
self.assertEqual(start, idmap.start)
self.assertEqual(target, idmap.target)
self.assertEqual(count, idmap.count)
def test_get_id_maps(self):
self.flags(virt_type="lxc", group="libvirt")
CONF.libvirt.virt_type = "lxc"
CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(len(idmaps), 4)
self._assert_on_id_map(idmaps[0],
vconfig.LibvirtConfigGuestUIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[1],
vconfig.LibvirtConfigGuestUIDMap,
1, 20000, 10)
self._assert_on_id_map(idmaps[2],
vconfig.LibvirtConfigGuestGIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[3],
vconfig.LibvirtConfigGuestGIDMap,
1, 20000, 10)
def test_get_id_maps_not_lxc(self):
CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(0, len(idmaps))
def test_get_id_maps_only_uid(self):
self.flags(virt_type="lxc", group="libvirt")
CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
CONF.libvirt.gid_maps = []
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(2, len(idmaps))
self._assert_on_id_map(idmaps[0],
vconfig.LibvirtConfigGuestUIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[1],
vconfig.LibvirtConfigGuestUIDMap,
1, 20000, 10)
def test_get_id_maps_only_gid(self):
self.flags(virt_type="lxc", group="libvirt")
CONF.libvirt.uid_maps = []
CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(2, len(idmaps))
self._assert_on_id_map(idmaps[0],
vconfig.LibvirtConfigGuestGIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[1],
vconfig.LibvirtConfigGuestGIDMap,
1, 20000, 10)
def test_instance_on_disk(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(uuid='fake-uuid', id=1)
self.assertFalse(drvr.instance_on_disk(instance))
def test_instance_on_disk_rbd(self):
self.flags(images_type='rbd', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(uuid='fake-uuid', id=1)
self.assertTrue(drvr.instance_on_disk(instance))
def test_get_disk_xml(self):
dom_xml = """
<domain type="kvm">
<devices>
<disk type="file">
<source file="disk1_file"/>
<target dev="vda" bus="virtio"/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type="block">
<source dev="/path/to/dev/1"/>
<target dev="vdb" bus="virtio" serial="1234"/>
</disk>
</devices>
</domain>
"""
diska_xml = """<disk type="file" device="disk">
<source file="disk1_file"/>
<target bus="virtio" dev="vda"/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>"""
diskb_xml = """<disk type="block" device="disk">
<source dev="/path/to/dev/1"/>
<target bus="virtio" dev="vdb"/>
</disk>"""
dom = mock.MagicMock()
dom.XMLDesc.return_value = dom_xml
guest = libvirt_guest.Guest(dom)
# NOTE(gcb): etree.tostring(node) returns an extra line with
# some white spaces, need to strip it.
actual_diska_xml = guest.get_disk('vda').to_xml()
self.assertEqual(diska_xml.strip(), actual_diska_xml.strip())
actual_diskb_xml = guest.get_disk('vdb').to_xml()
self.assertEqual(diskb_xml.strip(), actual_diskb_xml.strip())
self.assertIsNone(guest.get_disk('vdc'))
def test_vcpu_model_from_config(self):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
vcpu_model = drv._cpu_config_to_vcpu_model(None, None)
self.assertIsNone(vcpu_model)
cpu = vconfig.LibvirtConfigGuestCPU()
feature1 = vconfig.LibvirtConfigGuestCPUFeature()
feature2 = vconfig.LibvirtConfigGuestCPUFeature()
feature1.name = 'sse'
feature1.policy = cpumodel.POLICY_REQUIRE
feature2.name = 'aes'
feature2.policy = cpumodel.POLICY_REQUIRE
cpu.features = set([feature1, feature2])
cpu.mode = cpumodel.MODE_CUSTOM
cpu.sockets = 1
cpu.cores = 2
cpu.threads = 4
vcpu_model = drv._cpu_config_to_vcpu_model(cpu, None)
self.assertEqual(cpumodel.MATCH_EXACT, vcpu_model.match)
self.assertEqual(cpumodel.MODE_CUSTOM, vcpu_model.mode)
self.assertEqual(4, vcpu_model.topology.threads)
self.assertEqual(set(['sse', 'aes']),
set([f.name for f in vcpu_model.features]))
cpu.mode = cpumodel.MODE_HOST_MODEL
vcpu_model_1 = drv._cpu_config_to_vcpu_model(cpu, vcpu_model)
self.assertEqual(cpumodel.MODE_HOST_MODEL, vcpu_model.mode)
self.assertEqual(vcpu_model, vcpu_model_1)
@mock.patch.object(lvm, 'get_volume_size', return_value=10)
@mock.patch.object(host.Host, "get_guest")
@mock.patch.object(dmcrypt, 'delete_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.unfilter_instance')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
@mock.patch.object(objects.Instance, 'save')
def test_cleanup_lvm_encrypted(self, mock_save, mock_undefine_domain,
mock_unfilter, mock_delete_volume,
mock_get_guest, mock_get_size):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance = objects.Instance(uuid='fake-uuid', id=1,
ephemeral_key_uuid='000-000-000')
instance.system_metadata = {}
block_device_info = {'root_device_name': '/dev/vda',
'ephemerals': [],
'block_device_mapping': []}
self.flags(images_type="lvm",
group='libvirt')
dom_xml = """
<domain type="kvm">
<devices>
<disk type="block">
<driver name='qemu' type='raw' cache='none'/>
<source dev="/dev/mapper/fake-dmcrypt"/>
<target dev="vda" bus="virtio" serial="1234"/>
</disk>
</devices>
</domain>
"""
dom = mock.MagicMock()
dom.XMLDesc.return_value = dom_xml
guest = libvirt_guest.Guest(dom)
mock_get_guest.return_value = guest
drv.cleanup(self.context, instance, 'fake_network', destroy_vifs=False,
block_device_info=block_device_info)
mock_delete_volume.assert_called_once_with('/dev/mapper/fake-dmcrypt')
@mock.patch.object(lvm, 'get_volume_size', return_value=10)
@mock.patch.object(host.Host, "get_guest")
@mock.patch.object(dmcrypt, 'delete_volume')
def _test_cleanup_lvm(self, mock_delete_volume, mock_get_guest, mock_size,
encrypted=False):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance = objects.Instance(uuid='fake-uuid', id=1,
ephemeral_key_uuid='000-000-000')
block_device_info = {'root_device_name': '/dev/vda',
'ephemerals': [],
'block_device_mapping': []}
dev_name = 'fake-dmcrypt' if encrypted else 'fake'
dom_xml = """
<domain type="kvm">
<devices>
<disk type="block">
<driver name='qemu' type='raw' cache='none'/>
<source dev="/dev/mapper/%s"/>
<target dev="vda" bus="virtio" serial="1234"/>
</disk>
</devices>
</domain>
""" % dev_name
dom = mock.MagicMock()
dom.XMLDesc.return_value = dom_xml
guest = libvirt_guest.Guest(dom)
mock_get_guest.return_value = guest
drv._cleanup_lvm(instance, block_device_info)
if encrypted:
mock_delete_volume.assert_called_once_with(
'/dev/mapper/fake-dmcrypt')
else:
self.assertFalse(mock_delete_volume.called)
def test_cleanup_lvm(self):
self._test_cleanup_lvm()
def test_cleanup_encrypted_lvm(self):
self._test_cleanup_lvm(encrypted=True)
def test_vcpu_model_to_config(self):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
feature = objects.VirtCPUFeature(policy=cpumodel.POLICY_REQUIRE,
name='sse')
feature_1 = objects.VirtCPUFeature(policy=cpumodel.POLICY_FORBID,
name='aes')
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=4)
vcpu_model = objects.VirtCPUModel(mode=cpumodel.MODE_HOST_MODEL,
features=[feature, feature_1],
topology=topo)
cpu = drv._vcpu_model_to_cpu_config(vcpu_model)
self.assertEqual(cpumodel.MODE_HOST_MODEL, cpu.mode)
self.assertEqual(1, cpu.sockets)
self.assertEqual(4, cpu.threads)
self.assertEqual(2, len(cpu.features))
self.assertEqual(set(['sse', 'aes']),
set([f.name for f in cpu.features]))
self.assertEqual(set([cpumodel.POLICY_REQUIRE,
cpumodel.POLICY_FORBID]),
set([f.policy for f in cpu.features]))
def test_trigger_crash_dump(self):
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
instance = objects.Instance(uuid='fake-uuid', id=1)
with mock.patch.object(self.drvr._host, 'get_guest',
return_value=mock_guest):
self.drvr.trigger_crash_dump(instance)
def test_trigger_crash_dump_not_running(self):
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'Requested operation is not valid: domain is not running',
error_code=fakelibvirt.VIR_ERR_OPERATION_INVALID)
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
mock_guest.inject_nmi = mock.Mock(side_effect=ex)
instance = objects.Instance(uuid='fake-uuid', id=1)
with mock.patch.object(self.drvr._host, 'get_guest',
return_value=mock_guest):
self.assertRaises(exception.InstanceNotRunning,
self.drvr.trigger_crash_dump, instance)
def test_trigger_crash_dump_not_supported(self):
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
mock_guest.inject_nmi = mock.Mock(side_effect=ex)
instance = objects.Instance(uuid='fake-uuid', id=1)
with mock.patch.object(self.drvr._host, 'get_guest',
return_value=mock_guest):
self.assertRaises(exception.TriggerCrashDumpNotSupported,
self.drvr.trigger_crash_dump, instance)
def test_trigger_crash_dump_unexpected_error(self):
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'UnexpectedError',
error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR)
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
mock_guest.inject_nmi = mock.Mock(side_effect=ex)
instance = objects.Instance(uuid='fake-uuid', id=1)
with mock.patch.object(self.drvr._host, 'get_guest',
return_value=mock_guest):
self.assertRaises(fakelibvirt.libvirtError,
self.drvr.trigger_crash_dump, instance)
class LibvirtVolumeUsageTestCase(test.NoDBTestCase):
"""Test for LibvirtDriver.get_all_volume_usage."""
def setUp(self):
super(LibvirtVolumeUsageTestCase, self).setUp()
self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.c = context.get_admin_context()
self.ins_ref = objects.Instance(
id=1729,
uuid='875a8070-d0b9-4949-8b31-104d125c9a64'
)
# verify bootable volume device path also
self.bdms = [{'volume_id': 1,
'device_name': '/dev/vde'},
{'volume_id': 2,
'device_name': 'vda'}]
def test_get_all_volume_usage(self):
def fake_block_stats(instance_name, disk):
return (169, 688640, 0, 0, -1)
self.stubs.Set(self.drvr, 'block_stats', fake_block_stats)
vol_usage = self.drvr.get_all_volume_usage(self.c,
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
expected_usage = [{'volume': 1,
'instance': self.ins_ref,
'rd_bytes': 688640, 'wr_req': 0,
'rd_req': 169, 'wr_bytes': 0},
{'volume': 2,
'instance': self.ins_ref,
'rd_bytes': 688640, 'wr_req': 0,
'rd_req': 169, 'wr_bytes': 0}]
self.assertEqual(vol_usage, expected_usage)
def test_get_all_volume_usage_device_not_found(self):
def fake_get_domain(self, instance):
raise exception.InstanceNotFound(instance_id="fakedom")
self.stubs.Set(host.Host, 'get_domain', fake_get_domain)
vol_usage = self.drvr.get_all_volume_usage(self.c,
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
self.assertEqual(vol_usage, [])
class LibvirtNonblockingTestCase(test.NoDBTestCase):
"""Test libvirtd calls are nonblocking."""
def setUp(self):
super(LibvirtNonblockingTestCase, self).setUp()
self.flags(connection_uri="test:///default",
group='libvirt')
def test_connection_to_primitive(self):
# Test bug 962840.
import nova.virt.libvirt.driver as libvirt_driver
drvr = libvirt_driver.LibvirtDriver('')
drvr.set_host_enabled = mock.Mock()
jsonutils.to_primitive(drvr._conn, convert_instances=True)
def test_tpool_execute_calls_libvirt(self):
conn = fakelibvirt.virConnect()
conn.is_expected = True
self.mox.StubOutWithMock(eventlet.tpool, 'execute')
eventlet.tpool.execute(
fakelibvirt.openAuth,
'test:///default',
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(conn)
eventlet.tpool.execute(
conn.domainEventRegisterAny,
None,
fakelibvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
mox.IgnoreArg(),
mox.IgnoreArg())
if hasattr(fakelibvirt.virConnect, 'registerCloseCallback'):
eventlet.tpool.execute(
conn.registerCloseCallback,
mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
c = driver._get_connection()
self.assertTrue(c.is_expected)
class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
"""Tests for libvirtDriver.volume_snapshot_create/delete."""
def setUp(self):
super(LibvirtVolumeSnapshotTestCase, self).setUp()
self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.c = context.get_admin_context()
self.flags(instance_name_template='instance-%s')
self.flags(qemu_allowed_storage_drivers=[], group='libvirt')
# creating instance
self.inst = {}
self.inst['uuid'] = uuidutils.generate_uuid()
self.inst['id'] = '1'
# create domain info
self.dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio' serial='1234'/>
</disk>
</devices>
</domain>"""
# alternate domain info with network-backed snapshot chain
self.dom_netdisk_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial>
</disk>
<disk type='network' device='disk'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/root.img'>
<host name='server1' port='24007'/>
</source>
<backingStore type='network' index='1'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/snap.img'>
<host name='server1' port='24007'/>
</source>
<backingStore type='network' index='2'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/snap-b.img'>
<host name='server1' port='24007'/>
</source>
<backingStore/>
</backingStore>
</backingStore>
<target dev='vdb' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
</devices>
</domain>
"""
# XML with netdisk attached, and 1 snapshot taken
self.dom_netdisk_xml_2 = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial>
</disk>
<disk type='network' device='disk'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/snap.img'>
<host name='server1' port='24007'/>
</source>
<backingStore type='network' index='1'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/root.img'>
<host name='server1' port='24007'/>
</source>
<backingStore/>
</backingStore>
<target dev='vdb' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
</devices>
</domain>
"""
self.create_info = {'type': 'qcow2',
'snapshot_id': '1234-5678',
'new_file': 'new-file'}
self.volume_uuid = '0e38683e-f0af-418f-a3f1-6b67ea0f919d'
self.snapshot_id = '9c3ca9f4-9f4e-4dba-bedd-5c5e4b52b162'
self.delete_info_1 = {'type': 'qcow2',
'file_to_merge': 'snap.img',
'merge_target_file': None}
self.delete_info_2 = {'type': 'qcow2',
'file_to_merge': 'snap.img',
'merge_target_file': 'other-snap.img'}
self.delete_info_3 = {'type': 'qcow2',
'file_to_merge': None,
'merge_target_file': None}
self.delete_info_netdisk = {'type': 'qcow2',
'file_to_merge': 'snap.img',
'merge_target_file': 'root.img'}
self.delete_info_invalid_type = {'type': 'made_up_type',
'file_to_merge': 'some_file',
'merge_target_file':
'some_other_file'}
def tearDown(self):
super(LibvirtVolumeSnapshotTestCase, self).tearDown()
@mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.'
'refresh_connection_info')
@mock.patch('nova.objects.block_device.BlockDeviceMapping.'
'get_by_volume_and_instance')
def test_volume_refresh_connection_info(self,
mock_get_by_volume_and_instance,
mock_refresh_connection_info):
instance = objects.Instance(**self.inst)
fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
'id': 123,
'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'connection_info': '{"fake": "connection_info"}'})
fake_bdm = objects.BlockDeviceMapping(self.c, **fake_bdm)
mock_get_by_volume_and_instance.return_value = fake_bdm
self.drvr._volume_refresh_connection_info(self.c, instance,
self.volume_uuid)
mock_get_by_volume_and_instance.assert_called_once_with(
self.c, self.volume_uuid, instance.uuid)
mock_refresh_connection_info.assert_called_once_with(self.c, instance,
self.drvr._volume_api, self.drvr)
def test_volume_snapshot_create(self, quiesce=True):
"""Test snapshot creation with file-based disk."""
self.flags(instance_name_template='instance-%s')
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
instance = objects.Instance(**self.inst)
new_file = 'new-file'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
self.mox.StubOutWithMock(domain, 'snapshotCreateXML')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
snap_xml_src = (
'<domainsnapshot>\n'
' <disks>\n'
' <disk name="disk1_file" snapshot="external" type="file">\n'
' <source file="new-file"/>\n'
' </disk>\n'
' <disk name="vdb" snapshot="no"/>\n'
' </disks>\n'
'</domainsnapshot>\n')
# Older versions of libvirt may be missing these.
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
snap_flags_q = (snap_flags |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE)
if quiesce:
domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q)
else:
domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q).\
AndRaise(fakelibvirt.libvirtError(
'quiescing failed, no qemu-ga'))
domain.snapshotCreateXML(snap_xml_src, flags=snap_flags)
self.mox.ReplayAll()
guest = libvirt_guest.Guest(domain)
self.drvr._volume_snapshot_create(self.c, instance, guest,
self.volume_uuid, new_file)
self.mox.VerifyAll()
def test_volume_snapshot_create_libgfapi(self, quiesce=True):
"""Test snapshot creation with libgfapi network disk."""
self.flags(instance_name_template = 'instance-%s')
self.flags(qemu_allowed_storage_drivers = ['gluster'], group='libvirt')
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type='block'>
<source protocol='gluster' name='gluster1/volume-1234'>
<host name='127.3.4.5' port='24007'/>
</source>
<target dev='vdb' bus='virtio' serial='1234'/>
</disk>
</devices>
</domain>"""
instance = objects.Instance(**self.inst)
new_file = 'new-file'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
self.mox.StubOutWithMock(domain, 'snapshotCreateXML')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
snap_xml_src = (
'<domainsnapshot>\n'
' <disks>\n'
' <disk name="disk1_file" snapshot="external" type="file">\n'
' <source file="new-file"/>\n'
' </disk>\n'
' <disk name="vdb" snapshot="no"/>\n'
' </disks>\n'
'</domainsnapshot>\n')
# Older versions of libvirt may be missing these.
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
snap_flags_q = (snap_flags |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE)
if quiesce:
domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q)
else:
domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q).\
AndRaise(fakelibvirt.libvirtError(
'quiescing failed, no qemu-ga'))
domain.snapshotCreateXML(snap_xml_src, flags=snap_flags)
self.mox.ReplayAll()
guest = libvirt_guest.Guest(domain)
self.drvr._volume_snapshot_create(self.c, instance, guest,
self.volume_uuid, new_file)
self.mox.VerifyAll()
def test_volume_snapshot_create_noquiesce(self):
self.test_volume_snapshot_create(quiesce=False)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_can_quiesce(self, ver):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.inst)
image_meta = objects.ImageMeta.from_dict(
{"properties": {
"hw_qemu_guest_agent": "yes"}})
self.assertIsNone(self.drvr._can_quiesce(instance, image_meta))
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_can_quiesce_bad_hyp(self, ver):
self.flags(virt_type='xxx', group='libvirt')
instance = objects.Instance(**self.inst)
image_meta = objects.ImageMeta.from_dict(
{"properties": {
"hw_qemu_guest_agent": "yes"}})
self.assertRaises(exception.InstanceQuiesceNotSupported,
self.drvr._can_quiesce, instance, image_meta)
@mock.patch.object(host.Host,
'has_min_version', return_value=False)
def test_can_quiesce_bad_ver(self, ver):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.inst)
image_meta = {"properties": {
"hw_qemu_guest_agent": "yes"}}
self.assertRaises(exception.InstanceQuiesceNotSupported,
self.drvr._can_quiesce, instance, image_meta)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_can_quiesce_agent_not_enable(self, ver):
self.flags(virt_type='kvm', group='libvirt')
instance = objects.Instance(**self.inst)
image_meta = objects.ImageMeta.from_dict({})
self.assertRaises(exception.QemuGuestAgentNotEnabled,
self.drvr._can_quiesce, instance, image_meta)
def test_volume_snapshot_create_outer_success(self):
instance = objects.Instance(**self.inst)
domain = FakeVirtDomain(fake_xml=self.dom_xml, id=1)
guest = libvirt_guest.Guest(domain)
self.mox.StubOutWithMock(self.drvr._host, 'get_guest')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_create')
self.drvr._host.get_guest(instance).AndReturn(guest)
self.drvr._volume_snapshot_create(self.c,
instance,
guest,
self.volume_uuid,
self.create_info['new_file'])
self.drvr._volume_api.update_snapshot_status(
self.c, self.create_info['snapshot_id'], 'creating')
self.mox.StubOutWithMock(self.drvr._volume_api, 'get_snapshot')
self.drvr._volume_api.get_snapshot(self.c,
self.create_info['snapshot_id']).AndReturn({'status': 'available'})
self.mox.StubOutWithMock(self.drvr, '_volume_refresh_connection_info')
self.drvr._volume_refresh_connection_info(self.c, instance,
self.volume_uuid)
self.mox.ReplayAll()
self.drvr.volume_snapshot_create(self.c, instance, self.volume_uuid,
self.create_info)
def test_volume_snapshot_create_outer_failure(self):
instance = objects.Instance(**self.inst)
domain = FakeVirtDomain(fake_xml=self.dom_xml, id=1)
guest = libvirt_guest.Guest(domain)
self.mox.StubOutWithMock(self.drvr._host, 'get_guest')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_create')
self.drvr._host.get_guest(instance).AndReturn(guest)
self.drvr._volume_snapshot_create(self.c,
instance,
guest,
self.volume_uuid,
self.create_info['new_file']).\
AndRaise(exception.NovaException('oops'))
self.drvr._volume_api.update_snapshot_status(
self.c, self.create_info['snapshot_id'], 'error')
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.drvr.volume_snapshot_create,
self.c,
instance,
self.volume_uuid,
self.create_info)
def test_volume_snapshot_delete_1(self):
"""Deleting newest snapshot -- blockRebase."""
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE flag
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vda', 'snap.img', 0, flags=0)
domain.blockJobInfo('vda', flags=0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vda', flags=0).AndReturn(
{'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8})
def test_volume_snapshot_delete_relative_1(self):
"""Deleting newest snapshot -- blockRebase using relative flag"""
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
guest = libvirt_guest.Guest(domain)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_guest')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_guest(instance).AndReturn(guest)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vda', 'snap.img', 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE)
domain.blockJobInfo('vda', flags=0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vda', flags=0).AndReturn(
{'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
def _setup_block_rebase_domain_and_guest_mocks(self, dom_xml):
mock_domain = mock.Mock(spec=fakelibvirt.virDomain)
mock_domain.XMLDesc.return_value = dom_xml
guest = libvirt_guest.Guest(mock_domain)
exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError, 'virDomainBlockRebase() failed',
error_code=fakelibvirt.VIR_ERR_OPERATION_INVALID)
mock_domain.blockRebase.side_effect = exc
return mock_domain, guest
@mock.patch.object(host.Host, "has_min_version",
mock.Mock(return_value=True))
@mock.patch("nova.virt.libvirt.guest.Guest.is_active",
mock.Mock(return_value=False))
@mock.patch('nova.virt.images.qemu_img_info',
return_value=mock.Mock(file_format="fake_fmt"))
@mock.patch('nova.utils.execute')
def test_volume_snapshot_delete_when_dom_not_running(self, mock_execute,
mock_qemu_img_info):
"""Deleting newest snapshot of a file-based image when the domain is
not running should trigger a blockRebase using qemu-img not libvirt.
In this test, we rebase the image with another image as backing file.
"""
mock_domain, guest = self._setup_block_rebase_domain_and_guest_mocks(
self.dom_xml)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
with mock.patch.object(self.drvr._host, 'get_guest',
return_value=guest):
self.drvr._volume_snapshot_delete(self.c, instance,
self.volume_uuid, snapshot_id,
self.delete_info_1)
mock_qemu_img_info.assert_called_once_with("snap.img")
mock_execute.assert_called_once_with('qemu-img', 'rebase',
'-b', 'snap.img', '-F',
'fake_fmt', 'disk1_file')
@mock.patch.object(host.Host, "has_min_version",
mock.Mock(return_value=True))
@mock.patch("nova.virt.libvirt.guest.Guest.is_active",
mock.Mock(return_value=False))
@mock.patch('nova.virt.images.qemu_img_info',
return_value=mock.Mock(file_format="fake_fmt"))
@mock.patch('nova.utils.execute')
def test_volume_snapshot_delete_when_dom_not_running_and_no_rebase_base(
self, mock_execute, mock_qemu_img_info):
"""Deleting newest snapshot of a file-based image when the domain is
not running should trigger a blockRebase using qemu-img not libvirt.
In this test, the image is rebased onto no backing file (i.e.
it will exist independently of any backing file)
"""
mock_domain, mock_guest = (
self._setup_block_rebase_domain_and_guest_mocks(self.dom_xml))
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
with mock.patch.object(self.drvr._host, 'get_guest',
return_value=mock_guest):
self.drvr._volume_snapshot_delete(self.c, instance,
self.volume_uuid, snapshot_id,
self.delete_info_3)
self.assertEqual(0, mock_qemu_img_info.call_count)
mock_execute.assert_called_once_with('qemu-img', 'rebase',
'-b', '', 'disk1_file')
@mock.patch.object(host.Host, "has_min_version",
mock.Mock(return_value=True))
@mock.patch("nova.virt.libvirt.guest.Guest.is_active",
mock.Mock(return_value=False))
def test_volume_snapshot_delete_when_dom_with_nw_disk_not_running(self):
"""Deleting newest snapshot of a network disk when the domain is not
running should raise a NovaException.
"""
mock_domain, mock_guest = (
self._setup_block_rebase_domain_and_guest_mocks(
self.dom_netdisk_xml))
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
with mock.patch.object(self.drvr._host, 'get_guest',
return_value=mock_guest):
ex = self.assertRaises(exception.NovaException,
self.drvr._volume_snapshot_delete,
self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.assertIn('has not been fully tested', six.text_type(ex))
def test_volume_snapshot_delete_2(self):
"""Deleting older snapshot -- blockCommit."""
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_COMMIT_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(exception.Invalid,
self.drvr._volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
snapshot_id,
self.delete_info_2)
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_COMMIT_RELATIVE': 4})
def test_volume_snapshot_delete_relative_2(self):
"""Deleting older snapshot -- blockCommit using relative flag"""
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockCommit('vda', 'other-snap.img', 'snap.img', 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
domain.blockJobInfo('vda', flags=0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vda', flags=0).AndReturn({})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_2)
self.mox.VerifyAll()
def test_volume_snapshot_delete_nonrelative_null_base(self):
# Deleting newest and last snapshot of a volume
# with blockRebase. So base of the new image will be null.
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
guest = libvirt_guest.Guest(domain)
with test.nested(
mock.patch.object(domain, 'XMLDesc', return_value=self.dom_xml),
mock.patch.object(self.drvr._host, 'get_guest',
return_value=guest),
mock.patch.object(self.drvr._host, 'has_min_version',
return_value=True),
mock.patch.object(domain, 'blockRebase'),
mock.patch.object(domain, 'blockJobInfo',
return_value={'cur': 1000, 'end': 1000})
) as (mock_xmldesc, mock_get_guest, mock_has_min_version,
mock_rebase, mock_job_info):
self.drvr._volume_snapshot_delete(self.c, instance,
self.volume_uuid, snapshot_id,
self.delete_info_3)
mock_xmldesc.assert_called_once_with(flags=0)
mock_get_guest.assert_called_once_with(instance)
mock_has_min_version.assert_called_once_with((1, 1, 1,))
mock_rebase.assert_called_once_with('vda', None, 0, flags=0)
mock_job_info.assert_called_once_with('vda', flags=0)
def test_volume_snapshot_delete_netdisk_nonrelative_null_base(self):
# Deleting newest and last snapshot of a network attached volume
# with blockRebase. So base of the new image will be null.
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_netdisk_xml_2)
guest = libvirt_guest.Guest(domain)
with test.nested(
mock.patch.object(domain, 'XMLDesc',
return_value=self.dom_netdisk_xml_2),
mock.patch.object(self.drvr._host, 'get_guest',
return_value=guest),
mock.patch.object(self.drvr._host, 'has_min_version',
return_value=True),
mock.patch.object(domain, 'blockRebase'),
mock.patch.object(domain, 'blockJobInfo',
return_value={'cur': 1000, 'end': 1000})
) as (mock_xmldesc, mock_get_guest, mock_has_min_version,
mock_rebase, mock_job_info):
self.drvr._volume_snapshot_delete(self.c, instance,
self.volume_uuid, snapshot_id,
self.delete_info_3)
mock_xmldesc.assert_called_once_with(flags=0)
mock_get_guest.assert_called_once_with(instance)
mock_has_min_version.assert_called_once_with((1, 1, 1,))
mock_rebase.assert_called_once_with('vdb', None, 0, flags=0)
mock_job_info.assert_called_once_with('vdb', flags=0)
def test_volume_snapshot_delete_outer_success(self):
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete')
self.drvr._volume_snapshot_delete(self.c,
instance,
self.volume_uuid,
snapshot_id,
delete_info=self.delete_info_1)
self.drvr._volume_api.update_snapshot_status(
self.c, snapshot_id, 'deleting')
self.mox.StubOutWithMock(self.drvr, '_volume_refresh_connection_info')
self.drvr._volume_refresh_connection_info(self.c, instance,
self.volume_uuid)
self.mox.ReplayAll()
self.drvr.volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id,
self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_outer_failure(self):
instance = objects.Instance(**self.inst)
snapshot_id = '1234-9876'
FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete')
self.drvr._volume_snapshot_delete(self.c,
instance,
self.volume_uuid,
snapshot_id,
delete_info=self.delete_info_1).\
AndRaise(exception.NovaException('oops'))
self.drvr._volume_api.update_snapshot_status(
self.c, snapshot_id, 'error_deleting')
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.drvr.volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
snapshot_id,
self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_invalid_type(self):
instance = objects.Instance(**self.inst)
FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
self.drvr._volume_api.update_snapshot_status(
self.c, self.snapshot_id, 'error_deleting')
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.drvr.volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
self.snapshot_id,
self.delete_info_invalid_type)
def test_volume_snapshot_delete_netdisk_1(self):
"""Delete newest snapshot -- blockRebase for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, flags):
return self.dom_netdisk_xml
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vdb', 'vdb[1]', 0, flags=0)
domain.blockJobInfo('vdb', flags=0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vdb', flags=0).AndReturn(
{'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8})
def test_volume_snapshot_delete_netdisk_relative_1(self):
"""Delete newest snapshot -- blockRebase for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, flags):
return self.dom_netdisk_xml
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vdb', 'vdb[1]', 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE)
domain.blockJobInfo('vdb', flags=0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vdb', flags=0).AndReturn(
{'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_netdisk_2(self):
"""Delete older snapshot -- blockCommit for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, flags):
return self.dom_netdisk_xml
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_COMMIT_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(exception.Invalid,
self.drvr._volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
snapshot_id,
self.delete_info_netdisk)
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_COMMIT_RELATIVE': 4})
def test_volume_snapshot_delete_netdisk_relative_2(self):
"""Delete older snapshot -- blockCommit for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, flags):
return self.dom_netdisk_xml
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockCommit('vdb', 'vdb[0]', 'vdb[1]', 0,
flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
domain.blockJobInfo('vdb', flags=0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vdb', flags=0).AndReturn(
{'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id,
self.delete_info_netdisk)
self.mox.VerifyAll()
def _fake_convert_image(source, dest, in_format, out_format,
run_as_root=True):
libvirt_driver.libvirt_utils.files[dest] = ''
class _BaseSnapshotTests(test.NoDBTestCase):
def setUp(self):
super(_BaseSnapshotTests, self).setUp()
self.flags(snapshots_directory='./', group='libvirt')
self.context = context.get_admin_context()
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
self.image_service = nova.tests.unit.image.fake.stub_out_image_service(
self)
self.mock_update_task_state = mock.Mock()
test_instance = _create_test_instance()
self.instance_ref = objects.Instance(**test_instance)
self.instance_ref.info_cache = objects.InstanceInfoCache(
network_info=None)
def _assert_snapshot(self, snapshot, disk_format,
expected_properties=None):
self.mock_update_task_state.assert_has_calls([
mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD),
mock.call(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)])
props = snapshot['properties']
self.assertEqual(props['image_state'], 'available')
self.assertEqual(snapshot['status'], 'active')
self.assertEqual(snapshot['disk_format'], disk_format)
self.assertEqual(snapshot['name'], 'test-snap')
if expected_properties:
for expected_key, expected_value in \
six.iteritems(expected_properties):
self.assertEqual(expected_value, props[expected_key])
def _create_image(self, extra_properties=None):
properties = {'instance_id': self.instance_ref['id'],
'user_id': str(self.context.user_id)}
if extra_properties:
properties.update(extra_properties)
sent_meta = {'name': 'test-snap',
'is_public': False,
'status': 'creating',
'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = self.image_service.create(self.context, sent_meta)
return recv_meta
@mock.patch.object(imagebackend.Image, 'resolve_driver_format')
@mock.patch.object(host.Host, 'get_domain')
def _snapshot(self, image_id, mock_get_domain, mock_resolve):
mock_get_domain.return_value = FakeVirtDomain()
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
driver.snapshot(self.context, self.instance_ref, image_id,
self.mock_update_task_state)
snapshot = self.image_service.show(self.context, image_id)
return snapshot
def _test_snapshot(self, disk_format, extra_properties=None):
recv_meta = self._create_image(extra_properties=extra_properties)
snapshot = self._snapshot(recv_meta['id'])
self._assert_snapshot(snapshot, disk_format=disk_format,
expected_properties=extra_properties)
class LibvirtSnapshotTests(_BaseSnapshotTests):
def test_ami(self):
# Assign different image_ref from nova/images/fakes for testing ami
self.instance_ref.image_ref = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
self.instance_ref.system_metadata = \
utils.get_system_metadata_from_image(
{'disk_format': 'ami'})
self._test_snapshot(disk_format='ami')
@mock.patch.object(fake_libvirt_utils, 'disk_type', new='raw')
@mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image',
side_effect=_fake_convert_image)
def test_raw(self, mock_convert_image):
self._test_snapshot(disk_format='raw')
def test_qcow2(self):
self._test_snapshot(disk_format='qcow2')
@mock.patch.object(fake_libvirt_utils, 'disk_type', new='ploop')
@mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image',
side_effect=_fake_convert_image)
def test_ploop(self, mock_convert_image):
self._test_snapshot(disk_format='ploop')
def test_no_image_architecture(self):
self.instance_ref.image_ref = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
self._test_snapshot(disk_format='qcow2')
def test_no_original_image(self):
self.instance_ref.image_ref = '661122aa-1234-dede-fefe-babababababa'
self._test_snapshot(disk_format='qcow2')
def test_snapshot_metadata_image(self):
# Assign an image with an architecture defined (x86_64)
self.instance_ref.image_ref = 'a440c04b-79fa-479c-bed1-0b816eaec379'
extra_properties = {'architecture': 'fake_arch',
'key_a': 'value_a',
'key_b': 'value_b',
'os_type': 'linux'}
self._test_snapshot(disk_format='qcow2',
extra_properties=extra_properties)
@mock.patch.object(rbd_utils, 'RBDDriver')
@mock.patch.object(rbd_utils, 'rbd')
def test_raw_with_rbd_clone(self, mock_rbd, mock_driver):
self.flags(images_type='rbd', group='libvirt')
rbd = mock_driver.return_value
rbd.parent_info = mock.Mock(return_value=['test-pool', '', ''])
rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd'])
with mock.patch.object(fake_libvirt_utils, 'find_disk',
return_value=('rbd://some/fake/rbd/image',
'raw')):
with mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'):
self._test_snapshot(disk_format='raw')
rbd.clone.assert_called_with(mock.ANY, mock.ANY, dest_pool='test-pool')
rbd.flatten.assert_called_with(mock.ANY, pool='test-pool')
@mock.patch.object(rbd_utils, 'RBDDriver')
@mock.patch.object(rbd_utils, 'rbd')
def test_raw_with_rbd_clone_graceful_fallback(self, mock_rbd, mock_driver):
self.flags(images_type='rbd', group='libvirt')
rbd = mock_driver.return_value
rbd.parent_info = mock.Mock(side_effect=exception.ImageUnacceptable(
image_id='fake_id', reason='rbd testing'))
with test.nested(
mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image',
side_effect=_fake_convert_image),
mock.patch.object(fake_libvirt_utils, 'find_disk',
return_value=('rbd://some/fake/rbd/image',
'raw')),
mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd')):
self._test_snapshot(disk_format='raw')
self.assertFalse(rbd.clone.called)
@mock.patch.object(rbd_utils, 'RBDDriver')
@mock.patch.object(rbd_utils, 'rbd')
def test_raw_with_rbd_clone_eperm(self, mock_rbd, mock_driver):
self.flags(images_type='rbd', group='libvirt')
rbd = mock_driver.return_value
rbd.parent_info = mock.Mock(return_value=['test-pool', '', ''])
rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd'])
rbd.clone = mock.Mock(side_effect=exception.Forbidden(
image_id='fake_id', reason='rbd testing'))
with test.nested(
mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image',
side_effect=_fake_convert_image),
mock.patch.object(fake_libvirt_utils, 'find_disk',
return_value=('rbd://some/fake/rbd/image',
'raw')),
mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd')):
self._test_snapshot(disk_format='raw')
# Ensure that the direct_snapshot attempt was cleaned up
rbd.remove_snap.assert_called_with('c', 'd', ignore_errors=False,
pool='b', force=True)
@mock.patch.object(rbd_utils, 'RBDDriver')
@mock.patch.object(rbd_utils, 'rbd')
def test_raw_with_rbd_clone_post_process_fails(self, mock_rbd,
mock_driver):
self.flags(images_type='rbd', group='libvirt')
rbd = mock_driver.return_value
rbd.parent_info = mock.Mock(return_value=['test-pool', '', ''])
rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd'])
with test.nested(
mock.patch.object(fake_libvirt_utils, 'find_disk',
return_value=('rbd://some/fake/rbd/image',
'raw')),
mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'),
mock.patch.object(self.image_service, 'update',
side_effect=test.TestingException)):
self.assertRaises(test.TestingException, self._test_snapshot,
disk_format='raw')
rbd.clone.assert_called_with(mock.ANY, mock.ANY, dest_pool='test-pool')
rbd.flatten.assert_called_with(mock.ANY, pool='test-pool')
# Ensure that the direct_snapshot attempt was cleaned up
rbd.remove_snap.assert_called_with('c', 'd', ignore_errors=True,
pool='b', force=True)
@mock.patch.object(imagebackend.Image, 'direct_snapshot')
@mock.patch.object(imagebackend.Image, 'resolve_driver_format')
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(host.Host, 'get_guest')
def test_raw_with_rbd_clone_is_live_snapshot(self,
mock_get_guest,
mock_version,
mock_resolve,
mock_snapshot):
self.flags(disable_libvirt_livesnapshot=False, group='workarounds')
self.flags(images_type='rbd', group='libvirt')
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_guest._domain = mock.Mock()
mock_get_guest.return_value = mock_guest
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
recv_meta = self._create_image()
with mock.patch.object(driver, "suspend") as mock_suspend:
driver.snapshot(self.context, self.instance_ref, recv_meta['id'],
self.mock_update_task_state)
self.assertFalse(mock_suspend.called)
@mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image',
side_effect=_fake_convert_image)
@mock.patch.object(fake_libvirt_utils, 'find_disk')
@mock.patch.object(imagebackend.Image, 'resolve_driver_format')
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(host.Host, 'get_guest')
@mock.patch.object(rbd_utils, 'RBDDriver')
@mock.patch.object(rbd_utils, 'rbd')
def test_raw_with_rbd_clone_failure_does_cold_snapshot(self,
mock_rbd,
mock_driver,
mock_get_guest,
mock_version,
mock_resolve,
mock_find_disk,
mock_convert):
self.flags(disable_libvirt_livesnapshot=False, group='workarounds')
self.flags(images_type='rbd', group='libvirt')
rbd = mock_driver.return_value
rbd.parent_info = mock.Mock(side_effect=exception.ImageUnacceptable(
image_id='fake_id', reason='rbd testing'))
mock_find_disk.return_value = ('rbd://some/fake/rbd/image', 'raw')
mock_guest = mock.Mock(spec=libvirt_guest.Guest)
mock_guest.get_power_state.return_value = power_state.RUNNING
mock_guest._domain = mock.Mock()
mock_get_guest.return_value = mock_guest
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
recv_meta = self._create_image()
with mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'):
with mock.patch.object(driver, "suspend") as mock_suspend:
driver.snapshot(self.context, self.instance_ref,
recv_meta['id'], self.mock_update_task_state)
self.assertTrue(mock_suspend.called)
class LXCSnapshotTests(LibvirtSnapshotTests):
"""Repeat all of the Libvirt snapshot tests, but with LXC enabled"""
def setUp(self):
super(LXCSnapshotTests, self).setUp()
self.flags(virt_type='lxc', group='libvirt')
def test_raw_with_rbd_clone_failure_does_cold_snapshot(self):
self.skipTest("managedSave is not supported with LXC")
class LVMSnapshotTests(_BaseSnapshotTests):
@mock.patch.object(fake_libvirt_utils, 'disk_type', new='lvm')
@mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image',
side_effect=_fake_convert_image)
@mock.patch.object(libvirt_driver.imagebackend.lvm, 'volume_info')
def _test_lvm_snapshot(self, disk_format, mock_volume_info,
mock_convert_image):
self.flags(images_type='lvm',
images_volume_group='nova-vg', group='libvirt')
self._test_snapshot(disk_format=disk_format)
mock_volume_info.assert_has_calls([mock.call('/dev/nova-vg/lv')])
mock_convert_image.assert_called_once_with(
'/dev/nova-vg/lv', mock.ANY, 'raw', disk_format,
run_as_root=True)
def test_raw(self):
self._test_lvm_snapshot('raw')
def test_qcow2(self):
self.flags(snapshot_image_format='qcow2', group='libvirt')
self._test_lvm_snapshot('qcow2')
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
from pants.build_graph.target import Target
class NodeTest(Target):
"""Javascript tests run via a script specified in a package.json file."""
def __init__(self, script_name=None, timeout=None, address=None, payload=None, **kwargs):
"""
:param string script_name: The tests script name in package.json. Defaults to "test".
:param int timeout: The test target timeout.
"""
payload = payload or Payload()
payload.add_fields(
{
"script_name": PrimitiveField(script_name or "test"),
"timeout": PrimitiveField(timeout),
}
)
super().__init__(address=address, payload=payload, **kwargs)
@property
def script_name(self):
"""The script name in package.json that runs the tests.
:rtype: string
"""
return self.payload.script_name
@property
def timeout(self):
"""The test target timeout.
:rtype: int
"""
return self.payload.timeout
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.statistics;
import javax.annotation.Nullable;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Interface exported by classes which support
* aggregation of {@link IOStatistics}.
* Implementations MAY aggregate all statistics
* exported by the IOStatistics reference passed in to
* {@link #aggregate(IOStatistics)}, or they
* may selectively aggregate specific values/classes
* of statistics.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface IOStatisticsAggregator {
/**
* Aggregate the supplied statistics into the current
* set.
*
* @param statistics statistics; may be null
* @return true if the statistics reference was not null and
* so aggregated.
*/
boolean aggregate(@Nullable IOStatistics statistics);
}
|
java
|
github
|
https://github.com/apache/hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsAggregator.java
|
# -*- coding: utf-8 -*-
import struct
import os
import logging
class InvalidFileError(Exception):
pass
class UnsupportedVersionError(Exception):
pass
class FileStream:
def __init__(self, path, file_obj, pmx_header):
self.__path = path
self.__file_obj = file_obj
self.__header = pmx_header
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def path(self):
return self.__path
def header(self):
if self.__header is None:
raise Exception
return self.__header
def setHeader(self, pmx_header):
self.__header = pmx_header
def close(self):
if self.__file_obj is not None:
logging.debug('close the file("%s")', self.__path)
self.__file_obj.close()
self.__file_obj = None
class FileReadStream(FileStream):
def __init__(self, path, pmx_header=None):
self.__fin = open(path, 'rb')
FileStream.__init__(self, path, self.__fin, pmx_header)
def __readIndex(self, size, typedict):
index = None
if size in typedict :
index, = struct.unpack(typedict[size], self.__fin.read(size))
else:
raise ValueError('invalid data size %s'%str(size))
return index
def __readSignedIndex(self, size):
return self.__readIndex(size, { 1 :"<b", 2 :"<h", 4 :"<i"})
def __readUnsignedIndex(self, size):
return self.__readIndex(size, { 1 :"<B", 2 :"<H", 4 :"<I"})
# READ methods for indexes
def readVertexIndex(self):
return self.__readUnsignedIndex(self.header().vertex_index_size)
def readBoneIndex(self):
return self.__readSignedIndex(self.header().bone_index_size)
def readTextureIndex(self):
return self.__readSignedIndex(self.header().texture_index_size)
def readMorphIndex(self):
return self.__readSignedIndex(self.header().morph_index_size)
def readRigidIndex(self):
return self.__readSignedIndex(self.header().rigid_index_size)
def readMaterialIndex(self):
return self.__readSignedIndex(self.header().material_index_size)
# READ / WRITE methods for general types
def readInt(self):
v, = struct.unpack('<i', self.__fin.read(4))
return v
def readShort(self):
v, = struct.unpack('<h', self.__fin.read(2))
return v
def readUnsignedShort(self):
v, = struct.unpack('<H', self.__fin.read(2))
return v
def readStr(self):
length = self.readInt()
fmt = '<' + str(length) + 's'
buf, = struct.unpack(fmt, self.__fin.read(length))
return str(buf, self.header().encoding.charset)
def readFloat(self):
v, = struct.unpack('<f', self.__fin.read(4))
return v
def readVector(self, size):
fmt = '<'
for i in range(size):
fmt += 'f'
return list(struct.unpack(fmt, self.__fin.read(4*size)))
def readByte(self):
v, = struct.unpack('<B', self.__fin.read(1))
return v
def readBytes(self, length):
return self.__fin.read(length)
def readSignedByte(self):
v, = struct.unpack('<b', self.__fin.read(1))
return v
class FileWriteStream(FileStream):
def __init__(self, path, pmx_header=None):
self.__fout = open(path, 'wb')
FileStream.__init__(self, path, self.__fout, pmx_header)
def __writeIndex(self, index, size, typedict):
if size in typedict :
self.__fout.write(struct.pack(typedict[size], int(index)))
else:
raise ValueError('invalid data size %s'%str(size))
return
def __writeSignedIndex(self, index, size):
return self.__writeIndex(index, size, { 1 :"<b", 2 :"<h", 4 :"<i"})
def __writeUnsignedIndex(self, index, size):
return self.__writeIndex(index, size, { 1 :"<B", 2 :"<H", 4 :"<I"})
# WRITE methods for indexes
def writeVertexIndex(self, index):
return self.__writeUnsignedIndex(index, self.header().vertex_index_size)
def writeBoneIndex(self, index):
return self.__writeSignedIndex(index, self.header().bone_index_size)
def writeTextureIndex(self, index):
return self.__writeSignedIndex(index, self.header().texture_index_size)
def writeMorphIndex(self, index):
return self.__writeSignedIndex(index, self.header().morph_index_size)
def writeRigidIndex(self, index):
return self.__writeSignedIndex(index, self.header().rigid_index_size)
def writeMaterialIndex(self, index):
return self.__writeSignedIndex(index, self.header().material_index_size)
def writeInt(self, v):
self.__fout.write(struct.pack('<i', int(v)))
def writeShort(self, v):
self.__fout.write(struct.pack('<h', int(v)))
def writeUnsignedShort(self, v):
self.__fout.write(struct.pack('<H', int(v)))
def writeStr(self, v):
data = v.encode(self.header().encoding.charset)
self.writeInt(len(data))
self.__fout.write(data)
def writeFloat(self, v):
self.__fout.write(struct.pack('<f', float(v)))
def writeVector(self, v):
l = len(v)
fmt = '<'
for i in range(l):
fmt += 'f'
self.__fout.write(struct.pack(fmt, *v))
def writeByte(self, v):
self.__fout.write(struct.pack('<B', int(v)))
def writeBytes(self, v):
self.__fout.write(v)
def writeSignedByte(self, v):
self.__fout.write(struct.pack('<b', int(v)))
class Encoding:
_MAP = [
(0, 'utf-16-le'),
(1, 'utf-8'),
]
def __init__(self, arg):
self.index = 0
self.charset = ''
t = None
if isinstance(arg, str):
t = list(filter(lambda x: x[1]==arg, self._MAP))
if len(t) == 0:
raise ValueError('invalid charset %s'%arg)
elif isinstance(arg, int):
t = list(filter(lambda x: x[0]==arg, self._MAP))
if len(t) == 0:
raise ValueError('invalid index %d'%arg)
else:
raise ValueError('invalid argument type')
t = t[0]
self.index = t[0]
self.charset = t[1]
def __repr__(self):
return '<Encoding charset %s>'%self.charset
class Coordinate:
""" """
def __init__(self, xAxis, zAxis):
self.x_axis = xAxis
self.z_axis = zAxis
class Header:
PMX_SIGN = b'PMX '
VERSION = 2.0
def __init__(self, model=None):
self.sign = self.PMX_SIGN
self.version = 0
self.encoding = Encoding('utf-16-le')
self.additional_uvs = 0
self.vertex_index_size = 1
self.texture_index_size = 1
self.material_index_size = 1
self.bone_index_size = 1
self.morph_index_size = 1
self.rigid_index_size = 1
if model is not None:
self.updateIndexSizes(model)
def updateIndexSizes(self, model):
self.vertex_index_size = self.__getIndexSize(len(model.vertices), False)
self.texture_index_size = self.__getIndexSize(len(model.textures), True)
self.material_index_size = self.__getIndexSize(len(model.materials), True)
self.bone_index_size = self.__getIndexSize(len(model.bones), True)
self.morph_index_size = self.__getIndexSize(len(model.morphs), True)
self.rigid_index_size = self.__getIndexSize(len(model.rigids), True)
@staticmethod
def __getIndexSize(num, signed):
s = 1
if signed:
s = 2
if (1<<8)/s > num:
return 1
elif (1<<16)/s > num:
return 2
else:
return 4
def load(self, fs):
logging.info('loading pmx header information...')
self.sign = fs.readBytes(4)
logging.debug('File signature is %s', self.sign)
if self.sign != self.PMX_SIGN:
logging.info('File signature is invalid')
logging.error('This file is unsupported format, or corrupt file.')
raise InvalidFileError('File signature is invalid.')
self.version = fs.readFloat()
logging.info('pmx format version: %f', self.version)
if self.version != self.VERSION:
logging.error('PMX version %.1f is unsupported', self.version)
raise UnsupportedVersionError('unsupported PMX version: %.1f'%self.version)
if fs.readByte() != 8:
raise InvalidFileError
self.encoding = Encoding(fs.readByte())
self.additional_uvs = fs.readByte()
self.vertex_index_size = fs.readByte()
self.texture_index_size = fs.readByte()
self.material_index_size = fs.readByte()
self.bone_index_size = fs.readByte()
self.morph_index_size = fs.readByte()
self.rigid_index_size = fs.readByte()
logging.info('----------------------------')
logging.info('pmx header information')
logging.info('----------------------------')
logging.info('pmx version: %.1f', self.version)
logging.info('encoding: %s', str(self.encoding))
logging.info('number of uvs: %d', self.additional_uvs)
logging.info('vertex index size: %d byte(s)', self.vertex_index_size)
logging.info('texture index: %d byte(s)', self.texture_index_size)
logging.info('material index: %d byte(s)', self.material_index_size)
logging.info('bone index: %d byte(s)', self.bone_index_size)
logging.info('morph index: %d byte(s)', self.morph_index_size)
logging.info('rigid index: %d byte(s)', self.rigid_index_size)
logging.info('----------------------------')
def save(self, fs):
fs.writeBytes(self.PMX_SIGN)
fs.writeFloat(self.VERSION)
fs.writeByte(8)
fs.writeByte(self.encoding.index)
fs.writeByte(self.additional_uvs)
fs.writeByte(self.vertex_index_size)
fs.writeByte(self.texture_index_size)
fs.writeByte(self.material_index_size)
fs.writeByte(self.bone_index_size)
fs.writeByte(self.morph_index_size)
fs.writeByte(self.rigid_index_size)
def __repr__(self):
return '<Header encoding %s, uvs %d, vtx %d, tex %d, mat %d, bone %d, morph %d, rigid %d>'%(
str(self.encoding),
self.additional_uvs,
self.vertex_index_size,
self.texture_index_size,
self.material_index_size,
self.bone_index_size,
self.morph_index_size,
self.rigid_index_size,
)
class Model:
def __init__(self):
self.header = None
self.name = ''
self.name_e = ''
self.comment = ''
self.comment_e = ''
self.vertices = []
self.faces = []
self.textures = []
self.materials = []
self.bones = []
self.morphs = []
self.display = []
dsp_root = Display()
dsp_root.isSpecial = True
dsp_root.name = 'Root'
dsp_root.name_e = 'Root'
self.display.append(dsp_root)
dsp_face = Display()
dsp_face.isSpecial = True
dsp_face.name = '表情'
dsp_face.name_e = ''
self.display.append(dsp_face)
self.rigids = []
self.joints = []
def load(self, fs):
self.name = fs.readStr()
self.name_e = fs.readStr()
self.comment = fs.readStr()
self.comment_e = fs.readStr()
logging.info('Model name: %s', self.name)
logging.info('Model name(english): %s', self.name_e)
logging.info('Comment:%s', self.comment)
logging.info('Comment(english):%s', self.comment_e)
logging.info('')
logging.info('------------------------------')
logging.info('Load Vertices')
logging.info('------------------------------')
num_vertices = fs.readInt()
self.vertices = []
for i in range(num_vertices):
v = Vertex()
v.load(fs)
self.vertices.append(v)
logging.info('----- Loaded %d vertices', len(self.vertices))
logging.info('')
logging.info('------------------------------')
logging.info(' Load Faces')
logging.info('------------------------------')
num_faces = fs.readInt()
self.faces = []
for i in range(int(num_faces/3)):
f1 = fs.readVertexIndex()
f2 = fs.readVertexIndex()
f3 = fs.readVertexIndex()
self.faces.append((f3, f2, f1))
logging.info(' Load %d faces', len(self.faces))
logging.info('')
logging.info('------------------------------')
logging.info(' Load Textures')
logging.info('------------------------------')
num_textures = fs.readInt()
self.textures = []
for i in range(num_textures):
t = Texture()
t.load(fs)
self.textures.append(t)
logging.info('Texture %d: %s', i, t.path)
logging.info(' ----- Loaded %d textures', len(self.textures))
logging.info('')
logging.info('------------------------------')
logging.info(' Load Materials')
logging.info('------------------------------')
num_materials = fs.readInt()
self.materials = []
for i in range(num_materials):
m = Material()
m.load(fs)
self.materials.append(m)
logging.info('Material %d: %s', i, m.name)
logging.debug(' Name(english): %s', m.name_e)
logging.debug(' Comment: %s', m.comment)
logging.debug(' Vertex Count: %d', m.vertex_count)
logging.debug(' Diffuse: (%.2f, %.2f, %.2f, %.2f)', *m.diffuse)
logging.debug(' Specular: (%.2f, %.2f, %.2f, %.2f)', *m.specular)
logging.debug(' Ambient: (%.2f, %.2f, %.2f)', *m.ambient)
logging.debug(' Double Sided: %s', str(m.is_double_sided))
logging.debug(' Drop Shadow: %s', str(m.enabled_drop_shadow))
logging.debug(' Self Shadow: %s', str(m.enabled_self_shadow))
logging.debug(' Self Shadow Map: %s', str(m.enabled_self_shadow_map))
logging.debug(' Edge: %s', str(m.enabled_toon_edge))
logging.debug(' Edge Color: (%.2f, %.2f, %.2f, %.2f)', *m.edge_color)
logging.debug(' Edge Size: %.2f', m.edge_size)
if m.texture != -1:
logging.debug(' Texture Index: %d', m.texture)
else:
logging.debug(' Texture: None')
if m.sphere_texture != -1:
logging.debug(' Sphere Texture Index: %d', m.sphere_texture)
logging.debug(' Sphere Texture Mode: %d', m.sphere_texture_mode)
else:
logging.debug(' Sphere Texture: None')
logging.debug('')
logging.info('----- Loaded %d materials.', len(self.materials))
logging.info('')
logging.info('------------------------------')
logging.info(' Load Bones')
logging.info('------------------------------')
num_bones = fs.readInt()
self.bones = []
for i in range(num_bones):
b = Bone()
b.load(fs)
self.bones.append(b)
logging.info('Bone %d: %s', i, b.name)
logging.debug(' Name(english): %s', b.name_e)
logging.debug(' Location: (%f, %f, %f)', *b.location)
logging.debug(' Parent: %s', str(b.parent))
logging.debug(' Transform Order: %s', str(b.transform_order))
logging.debug(' Rotatable: %s', str(b.isRotatable))
logging.debug(' Movable: %s', str(b.isMovable))
logging.debug(' Visible: %s', str(b.visible))
logging.debug(' Controllable: %s', str(b.isControllable))
logging.debug(' Edge: %s', str(m.enabled_toon_edge))
logging.debug(' Additional Location: %s', str(b.hasAdditionalRotate))
logging.debug(' Additional Rotation: %s', str(b.hasAdditionalRotate))
if b.additionalTransform is not None:
logging.debug(' Additional Transform: Bone:%d, influence: %f', *b.additionalTransform)
logging.debug(' IK: %s', str(b.isIK))
if b.isIK:
for j, link in enumerate(b.ik_links):
if isinstance(link.minimumAngle, list) and len(link.minimumAngle) == 3:
min_str = '(%f, %f, %f)'%tuple(link.minimumAngle)
else:
min_str = '(None, None, None)'
if isinstance(link.maximumAngle, list) and len(link.maximumAngle) == 3:
max_str = '(%f, %f, %f)'%tuple(link.maximumAngle)
else:
max_str = '(None, None, None)'
logging.debug(' IK Link %d: %d, %s - %s', j, link.target, min_str, max_str)
logging.debug('')
logging.info('----- Loaded %d bones.', len(self.bones))
logging.info('')
logging.info('------------------------------')
logging.info(' Load Morphs')
logging.info('------------------------------')
num_morph = fs.readInt()
self.morphs = []
display_categories = {0: 'System', 1: 'Eyebrow', 2: 'Eye', 3: 'Mouth', 4: 'Other'}
for i in range(num_morph):
m = Morph.create(fs)
self.morphs.append(m)
logging.info('%s %d: %s', m.__class__.__name__, i, m.name)
logging.debug(' Name(english): %s', m.name_e)
logging.debug(' Category: %s', display_categories[m.category])
logging.debug('')
logging.info('----- Loaded %d morphs.', len(self.morphs))
logging.info('')
logging.info('------------------------------')
logging.info(' Load Display Items')
logging.info('------------------------------')
num_disp = fs.readInt()
self.display = []
for i in range(num_disp):
d = Display()
d.load(fs)
self.display.append(d)
logging.info('Display Item %d: %s', i, d.name)
logging.debug(' Name(english): %s', d.name_e)
logging.debug('')
logging.info('----- Loaded %d display items.', len(self.display))
logging.info('')
logging.info('------------------------------')
logging.info(' Load Rigid Bodies')
logging.info('------------------------------')
num_rigid = fs.readInt()
self.rigids = []
rigid_types = {0: 'Sphere', 1: 'Box', 2: 'Capsule'}
rigid_modes = {0: 'Static', 1: 'Dynamic', 2: 'Dynamic(track to bone)'}
for i in range(num_rigid):
r = Rigid()
r.load(fs)
self.rigids.append(r)
logging.info('Rigid Body %d: %s', i, r.name)
logging.debug(' Name(english): %s', r.name_e)
logging.debug(' Type: %s', rigid_types[r.type])
logging.debug(' Mode: %s', rigid_modes[r.mode])
if r.bone is not None:
logging.debug(' Related bone: %s (index: %d)', self.bones[r.bone].name, r.bone)
logging.debug(' Collision group: %d', r.collision_group_number)
logging.debug(' Collision group mask: 0x%x', r.collision_group_mask)
logging.debug(' Size: (%f, %f, %f)', *r.size)
logging.debug(' Location: (%f, %f, %f)', *r.location)
logging.debug(' Rotation: (%f, %f, %f)', *r.rotation)
logging.debug(' Mass: %f', r.mass)
logging.debug(' Bounce: %f', r.bounce)
logging.debug(' Friction: %f', r.friction)
logging.debug('')
logging.info('----- Loaded %d rigid bodies.', len(self.rigids))
logging.info('')
logging.info('------------------------------')
logging.info(' Load Joints')
logging.info('------------------------------')
num_joints = fs.readInt()
self.joints = []
for i in range(num_joints):
j = Joint()
j.load(fs)
self.joints.append(j)
logging.info('Joint %d: %s', i, j.name)
logging.debug(' Name(english): %s', j.name_e)
logging.debug(' Rigid A: %s (index: %d)', self.rigids[j.src_rigid].name, j.src_rigid)
logging.debug(' Rigid B: %s (index: %d)', self.rigids[j.dest_rigid].name, j.dest_rigid)
logging.debug(' Location: (%f, %f, %f)', *j.location)
logging.debug(' Rotation: (%f, %f, %f)', *j.rotation)
logging.debug(' Location Limit: (%f, %f, %f) - (%f, %f, %f)', *(j.minimum_location + j.maximum_location))
logging.debug(' Rotation Limit: (%f, %f, %f) - (%f, %f, %f)', *(j.minimum_rotation + j.maximum_rotation))
logging.debug(' Spring: (%f, %f, %f)', *j.spring_constant)
logging.debug(' Spring(rotation): (%f, %f, %f)', *j.spring_rotation_constant)
logging.debug('')
logging.info('----- Loaded %d joints.', len(self.joints))
def save(self, fs):
fs.writeStr(self.name)
fs.writeStr(self.name_e)
fs.writeStr(self.comment)
fs.writeStr(self.comment_e)
logging.info('''exportings pmx model data...
name: %s
name(english): %s
comment:
%s
comment(english):
%s
''', self.name, self.name_e, self.comment, self.comment_e)
logging.info('exporting vertices...')
fs.writeInt(len(self.vertices))
for i in self.vertices:
i.save(fs)
logging.info('the number of vetices: %d', len(self.vertices))
logging.info('finished exporting vertices.')
logging.info('exporting faces...')
fs.writeInt(len(self.faces)*3)
for f3, f2, f1 in self.faces:
fs.writeVertexIndex(f1)
fs.writeVertexIndex(f2)
fs.writeVertexIndex(f3)
logging.info('the number of faces: %d', len(self.faces))
logging.info('finished exporting faces.')
logging.info('exporting textures...')
fs.writeInt(len(self.textures))
for i in self.textures:
i.save(fs)
logging.info('the number of textures: %d', len(self.textures))
logging.info('finished exporting textures.')
logging.info('exporting materials...')
fs.writeInt(len(self.materials))
for i in self.materials:
i.save(fs)
logging.info('the number of materials: %d', len(self.materials))
logging.info('finished exporting materials.')
logging.info('exporting bones...')
fs.writeInt(len(self.bones))
for i in self.bones:
i.save(fs)
logging.info('the number of bones: %d', len(self.bones))
logging.info('finished exporting bones.')
logging.info('exporting morphs...')
fs.writeInt(len(self.morphs))
for i in self.morphs:
i.save(fs)
logging.info('the number of morphs: %d', len(self.morphs))
logging.info('finished exporting morphs.')
logging.info('exporting display items...')
fs.writeInt(len(self.display))
for i in self.display:
i.save(fs)
logging.info('the number of display items: %d', len(self.display))
logging.info('finished exporting display items.')
logging.info('exporting rigid bodies...')
fs.writeInt(len(self.rigids))
for i in self.rigids:
logging.debug(' Rigid: %s', i.name)
i.save(fs)
logging.info('the number of rigid bodies: %d', len(self.rigids))
logging.info('finished exporting rigid bodies.')
logging.info('exporting joints...')
fs.writeInt(len(self.joints))
for i in self.joints:
i.save(fs)
logging.info('the number of joints: %d', len(self.joints))
logging.info('finished exporting joints.')
logging.info('finished exporting the model.')
def __repr__(self):
return '<Model name %s, name_e %s, comment %s, comment_e %s, textures %s>'%(
self.name,
self.name_e,
self.comment,
self.comment_e,
str(self.textures),
)
class Vertex:
def __init__(self):
self.co = [0.0, 0.0, 0.0]
self.normal = [0.0, 0.0, 0.0]
self.uv = [0.0, 0.0]
self.additional_uvs = []
self.weight = None
self.edge_scale = 1
def __repr__(self):
return '<Vertex co %s, normal %s, uv %s, additional_uvs %s, weight %s, edge_scale %s>'%(
str(self.co),
str(self.normal),
str(self.uv),
str(self.additional_uvs),
str(self.weight),
str(self.edge_scale),
)
def load(self, fs):
self.co = fs.readVector(3)
self.normal = fs.readVector(3)
self.uv = fs.readVector(2)
self.additional_uvs = []
for i in range(fs.header().additional_uvs):
self.additional_uvs.append(fs.readVector(4))
self.weight = BoneWeight()
self.weight.load(fs)
self.edge_scale = fs.readFloat()
def save(self, fs):
fs.writeVector(self.co)
fs.writeVector(self.normal)
fs.writeVector(self.uv)
for i in self.additional_uvs:
fs.writeVector(i)
self.weight.save(fs)
fs.writeFloat(self.edge_scale)
class BoneWeightSDEF:
def __init__(self, weight=0, c=None, r0=None, r1=None):
self.weight = weight
self.c = c
self.r0 = r0
self.r1 = r1
class BoneWeight:
BDEF1 = 0
BDEF2 = 1
BDEF4 = 2
SDEF = 3
TYPES = [
(BDEF1, 'BDEF1'),
(BDEF2, 'BDEF2'),
(BDEF4, 'BDEF4'),
(SDEF, 'SDEF'),
]
def __init__(self):
self.bones = []
self.weights = []
self.type = self.BDEF1
def convertIdToName(self, type_id):
t = list(filter(lambda x: x[0]==type_id, self.TYPES))
if len(t) > 0:
return t[0][1]
else:
return None
def convertNameToId(self, type_name):
t = list(filter(lambda x: x[1]==type_name, self.TYPES))
if len(t) > 0:
return t[0][0]
else:
return None
def load(self, fs):
self.type = fs.readByte()
self.bones = []
self.weights = []
if self.type == self.BDEF1:
self.bones.append(fs.readBoneIndex())
elif self.type == self.BDEF2:
self.bones.append(fs.readBoneIndex())
self.bones.append(fs.readBoneIndex())
self.weights.append(fs.readFloat())
elif self.type == self.BDEF4:
self.bones.append(fs.readBoneIndex())
self.bones.append(fs.readBoneIndex())
self.bones.append(fs.readBoneIndex())
self.bones.append(fs.readBoneIndex())
self.weights = fs.readVector(4)
elif self.type == self.SDEF:
self.bones.append(fs.readBoneIndex())
self.bones.append(fs.readBoneIndex())
self.weights = BoneWeightSDEF()
self.weights.weight = fs.readFloat()
self.weights.c = fs.readVector(3)
self.weights.r0 = fs.readVector(3)
self.weights.r1 = fs.readVector(3)
else:
raise ValueError('invalid weight type %s'%str(self.type))
def save(self, fs):
fs.writeByte(self.type)
if self.type == self.BDEF1:
fs.writeBoneIndex(self.bones[0])
elif self.type == self.BDEF2:
for i in range(2):
fs.writeBoneIndex(self.bones[i])
fs.writeFloat(self.weights[0])
elif self.type == self.BDEF4:
for i in range(4):
fs.writeBoneIndex(self.bones[i])
for i in range(4):
fs.writeFloat(self.weights[i])
elif self.type == self.SDEF:
for i in range(2):
fs.writeBoneIndex(self.bones[i])
if not isinstance(self.weights, BoneWeightSDEF):
raise ValueError
fs.writeFloat(self.weights.weight)
fs.writeVector(self.weights.c)
fs.writeVector(self.weights.r0)
fs.writeVector(self.weights.r1)
else:
raise ValueError('invalid weight type %s'%str(self.type))
class Texture:
def __init__(self):
self.path = ''
def __repr__(self):
return '<Texture path %s>'%str(self.path)
def load(self, fs):
self.path = fs.readStr()
if not os.path.isabs(self.path):
self.path = os.path.normpath(os.path.join(os.path.dirname(fs.path()), self.path))
def save(self, fs):
fs.writeStr(os.path.relpath(self.path, os.path.dirname(fs.path())))
class SharedTexture(Texture):
def __init__(self):
self.number = 0
self.prefix = ''
class Material:
SPHERE_MODE_OFF = 0
SPHERE_MODE_MULT = 1
SPHERE_MODE_ADD = 2
SPHERE_MODE_SUBTEX = 3
def __init__(self):
self.name = ''
self.name_e = ''
self.diffuse = []
self.specular = []
self.ambient = []
self.is_double_sided = False
self.enabled_drop_shadow = False
self.enabled_self_shadow_map = False
self.enabled_self_shadow = False
self.enabled_toon_edge = False
self.edge_color = []
self.edge_size = 1
self.texture = -1
self.sphere_texture = -1
self.sphere_texture_mode = 0
self.is_shared_toon_texture = True
self.toon_texture = 0
self.comment = ''
self.vertex_count = 0
def __repr__(self):
return '<Material name %s, name_e %s, diffuse %s, specular %s, ambient %s, double_side %s, drop_shadow %s, self_shadow_map %s, self_shadow %s, toon_edge %s, edge_color %s, edge_size %s, toon_texture %s, comment %s>'%(
self.name,
self.name_e,
str(self.diffuse),
str(self.specular),
str(self.ambient),
str(self.is_double_sided),
str(self.enabled_drop_shadow),
str(self.enabled_self_shadow_map),
str(self.enabled_self_shadow),
str(self.enabled_toon_edge),
str(self.edge_color),
str(self.edge_size),
str(self.texture),
str(self.sphere_texture),
str(self.toon_texture),
str(self.comment),)
def load(self, fs):
self.name = fs.readStr()
self.name_e = fs.readStr()
self.diffuse = fs.readVector(4)
self.specular = fs.readVector(4)
self.ambient = fs.readVector(3)
flags = fs.readByte()
self.is_double_sided = bool(flags & 1)
self.enabled_drop_shadow = bool(flags & 2)
self.enabled_self_shadow_map = bool(flags & 4)
self.enabled_self_shadow = bool(flags & 8)
self.enabled_toon_edge = bool(flags & 16)
self.edge_color = fs.readVector(4)
self.edge_size = fs.readFloat()
self.texture = fs.readTextureIndex()
self.sphere_texture = fs.readTextureIndex()
self.sphere_texture_mode = fs.readSignedByte()
self.is_shared_toon_texture = fs.readSignedByte()
self.is_shared_toon_texture = (self.is_shared_toon_texture == 1)
if self.is_shared_toon_texture:
self.toon_texture = fs.readSignedByte()
else:
self.toon_texture = fs.readTextureIndex()
self.comment = fs.readStr()
self.vertex_count = fs.readInt()
def save(self, fs):
fs.writeStr(self.name)
fs.writeStr(self.name)
fs.writeVector(self.diffuse)
fs.writeVector(self.specular)
fs.writeVector(self.ambient)
flags = 0
flags |= int(self.is_double_sided)
flags |= int(self.enabled_drop_shadow) << 1
flags |= int(self.enabled_self_shadow_map) << 2
flags |= int(self.enabled_self_shadow) << 3
flags |= int(self.enabled_toon_edge) << 4
fs.writeByte(flags)
fs.writeVector(self.edge_color)
fs.writeFloat(self.edge_size)
fs.writeTextureIndex(self.texture)
fs.writeTextureIndex(self.sphere_texture)
fs.writeSignedByte(self.sphere_texture_mode)
if self.is_shared_toon_texture:
fs.writeSignedByte(1)
fs.writeSignedByte(self.toon_texture)
else:
fs.writeSignedByte(0)
fs.writeTextureIndex(self.toon_texture)
fs.writeStr(self.comment)
fs.writeInt(self.vertex_count)
class Bone:
def __init__(self):
self.name = ''
self.name_e = ''
self.location = []
self.parent = None
self.transform_order = 0
# 接続先表示方法
# 座標オフセット(float3)または、boneIndex(int)
self.displayConnection = -1
self.isRotatable = True
self.isMovable = True
self.visible = True
self.isControllable = True
self.isIK = False
# 回転付与
self.hasAdditionalRotate = False
# 移動付与
self.hasAdditionalLocation = False
# 回転付与および移動付与の付与量
self.additionalTransform = None
# 軸固定
# 軸ベクトルfloat3
self.axis = None
# ローカル軸
self.localCoordinate = None
self.transAfterPhis = False
# 外部親変形
self.externalTransKey = None
# 以下IKボーンのみ有効な変数
self.target = None
self.loopCount = 8
# IKループ計三時の1回あたりの制限角度(ラジアン)
self.rotationConstraint = 0.03
# IKLinkオブジェクトの配列
self.ik_links = []
def __repr__(self):
return '<Bone name %s, name_e %s>'%(
self.name,
self.name_e,)
def load(self, fs):
self.name = fs.readStr()
self.name_e = fs.readStr()
self.location = fs.readVector(3)
self.parent = fs.readBoneIndex()
self.transform_order = fs.readInt()
flags = fs.readShort()
if flags & 0x0001:
self.displayConnection = fs.readBoneIndex()
else:
self.displayConnection = fs.readVector(3)
self.isRotatable = ((flags & 0x0002) != 0)
self.isMovable = ((flags & 0x0004) != 0)
self.visible = ((flags & 0x0008) != 0)
self.isControllable = ((flags & 0x0010) != 0)
self.isIK = ((flags & 0x0020) != 0)
self.hasAdditionalRotate = ((flags & 0x0100) != 0)
self.hasAdditionalLocation = ((flags & 0x0200) != 0)
if self.hasAdditionalRotate or self.hasAdditionalLocation:
t = fs.readBoneIndex()
v = fs.readFloat()
self.additionalTransform = (t, v)
else:
self.additionalTransform = None
if flags & 0x0400:
self.axis = fs.readVector(3)
else:
self.axis = None
if flags & 0x0800:
xaxis = fs.readVector(3)
zaxis = fs.readVector(3)
self.localCoordinate = Coordinate(xaxis, zaxis)
else:
self.localCoordinate = None
self.transAfterPhis = ((flags & 0x1000) != 0)
if flags & 0x2000:
self.externalTransKey = fs.readInt()
else:
self.externalTransKey = None
if self.isIK:
self.target = fs.readBoneIndex()
self.loopCount = fs.readInt()
self.rotationConstraint = fs.readFloat()
iklink_num = fs.readInt()
self.ik_links = []
for i in range(iklink_num):
link = IKLink()
link.load(fs)
self.ik_links.append(link)
def save(self, fs):
fs.writeStr(self.name)
fs.writeStr(self.name_e)
fs.writeVector(self.location)
fs.writeBoneIndex(-1 if self.parent is None else self.parent)
fs.writeInt(self.transform_order)
flags = 0
flags |= int(isinstance(self.displayConnection, int))
flags |= int(self.isRotatable) << 1
flags |= int(self.isMovable) << 2
flags |= int(self.visible) << 3
flags |= int(self.isControllable) << 4
flags |= int(self.isIK) << 5
flags |= int(self.hasAdditionalRotate) << 8
flags |= int(self.hasAdditionalLocation) << 9
flags |= int(self.axis is not None) << 10
flags |= int(self.localCoordinate is not None) << 11
flags |= int(self.externalTransKey is not None) << 13
fs.writeShort(flags)
if flags & 0x0001:
fs.writeBoneIndex(self.displayConnection)
else:
fs.writeVector(self.displayConnection)
if self.hasAdditionalRotate or self.hasAdditionalLocation:
fs.writeBoneIndex(self.additionalTransform[0])
fs.writeFloat(self.additionalTransform[1])
if flags & 0x0400:
fs.writeVector(self.axis)
if flags & 0x0800:
fs.writeVector(self.localCoordinate.x_axis)
fs.writeVector(self.localCoordinate.z_axis)
if flags & 0x2000:
fs.writeInt(self.externalTransKey)
if self.isIK:
fs.writeBoneIndex(self.target)
fs.writeInt(self.loopCount)
fs.writeFloat(self.rotationConstraint)
fs.writeInt(len(self.ik_links))
for i in self.ik_links:
i.save(fs)
class IKLink:
def __init__(self):
self.target = None
self.maximumAngle = None
self.minimumAngle = None
def __repr__(self):
return '<IKLink target %s>'%(str(self.target))
def load(self, fs):
self.target = fs.readBoneIndex()
flag = fs.readByte()
if flag == 1:
self.minimumAngle = fs.readVector(3)
self.maximumAngle = fs.readVector(3)
else:
self.minimumAngle = None
self.maximumAngle = None
def save(self, fs):
fs.writeBoneIndex(self.target)
if isinstance(self.minimumAngle, list) and isinstance(self.maximumAngle, list):
fs.writeByte(1)
fs.writeVector(self.minimumAngle)
fs.writeVector(self.maximumAngle)
else:
fs.writeByte(0)
class Morph:
CATEGORY_SYSTEM = 0
CATEGORY_EYEBROW = 1
CATEGORY_EYE = 2
CATEGORY_MOUTH = 3
CATEGORY_OHTER = 4
def __init__(self, name, name_e, category, **kwargs):
self.offsets = []
self.name = name
self.name_e = name_e
self.category = category
def __repr__(self):
return '<Morph name %s, name_e %s>'%(self.name, self.name_e)
def type_index(self):
raise NotImplementedError
@staticmethod
def create(fs):
_CLASSES = {
0: GroupMorph,
1: VertexMorph,
2: BoneMorph,
3: UVMorph,
4: UVMorph,
5: UVMorph,
6: UVMorph,
7: UVMorph,
8: MaterialMorph,
}
name = fs.readStr()
name_e = fs.readStr()
logging.debug('morph: %s', name)
category = fs.readSignedByte()
typeIndex = fs.readSignedByte()
ret = _CLASSES[typeIndex](name, name_e, category, type_index = typeIndex)
ret.load(fs)
return ret
def load(self, fs):
""" Implement for loading morph data.
"""
raise NotImplementedError
def save(self, fs):
fs.writeStr(self.name)
fs.writeStr(self.name_e)
fs.writeSignedByte(self.category)
fs.writeSignedByte(self.type_index())
fs.writeInt(len(self.offsets))
for i in self.offsets:
i.save(fs)
class VertexMorph(Morph):
def __init__(self, *args, **kwargs):
Morph.__init__(self, *args, **kwargs)
def type_index(self):
return 1
def load(self, fs):
num = fs.readInt()
for i in range(num):
t = VertexMorphOffset()
t.load(fs)
self.offsets.append(t)
class VertexMorphOffset:
def __init__(self):
self.index = 0
self.offset = []
def load(self, fs):
self.index = fs.readVertexIndex()
self.offset = fs.readVector(3)
def save(self, fs):
fs.writeVertexIndex(self.index)
fs.writeVector(self.offset)
class UVMorph(Morph):
def __init__(self, *args, **kwargs):
self.uv_index = kwargs.get('type_index', 3) - 3
Morph.__init__(self, *args, **kwargs)
def type_index(self):
return self.uv_index + 3
def load(self, fs):
self.offsets = []
num = fs.readInt()
for i in range(num):
t = UVMorphOffset()
t.load(fs)
self.offsets.append(t)
class UVMorphOffset:
def __init__(self):
self.index = 0
self.offset = []
def load(self, fs):
self.index = fs.readVertexIndex()
self.offset = fs.readVector(4)
def save(self, fs):
fs.writeVertexIndex(self.index)
fs.writeVector(self.offset)
class BoneMorph(Morph):
def __init__(self, *args, **kwargs):
Morph.__init__(self, *args, **kwargs)
def type_index(self):
return 2
def load(self, fs):
self.offsets = []
num = fs.readInt()
for i in range(num):
t = BoneMorphOffset()
t.load(fs)
self.offsets.append(t)
class BoneMorphOffset:
def __init__(self):
self.index = None
self.location_offset = []
self.rotation_offset = []
def load(self, fs):
self.index = fs.readBoneIndex()
self.location_offset = fs.readVector(3)
self.rotation_offset = fs.readVector(4)
def save(self, fs):
fs.writeBoneIndex(self.index)
fs.writeVector(self.location_offset)
fs.writeVector(self.rotation_offset)
class MaterialMorph(Morph):
def __init__(self, *args, **kwargs):
Morph.__init__(self, *args, **kwargs)
def type_index(self):
return 8
def load(self, fs):
self.offsets = []
num = fs.readInt()
for i in range(num):
t = MaterialMorphOffset()
t.load(fs)
self.offsets.append(t)
class MaterialMorphOffset:
TYPE_MULT = 0
TYPE_ADD = 1
def __init__(self):
self.index = 0
self.offset_type = 0
self.diffuse_offset = []
self.specular_offset = []
self.ambient_offset = []
self.edge_color_offset = []
self.edge_size_offset = []
self.texture_factor = []
self.sphere_texture_factor = []
self.toon_texture_factor = []
def load(self, fs):
self.index = fs.readMaterialIndex()
self.offset_type = fs.readSignedByte()
self.diffuse_offset = fs.readVector(4)
self.specular_offset = fs.readVector(4)
self.ambient_offset = fs.readVector(3)
self.edge_color_offset = fs.readVector(4)
self.edge_size_offset = fs.readFloat()
self.texture_factor = fs.readVector(4)
self.sphere_texture_factor = fs.readVector(4)
self.toon_texture_factor = fs.readVector(4)
def save(self, fs):
fs.writeMaterialIndex(self.index)
fs.writeSignedByte(self.offset_type)
fs.writeVector(self.diffuse_offset)
fs.writeVector(self.specular_offset)
fs.writeVector(self.ambient_offset)
fs.writeVector(self.edge_color_offset)
fs.writeFloat(self.edge_size_offset)
fs.writeVector(self.texture_factor)
fs.writeVector(self.sphere_texture_factor)
fs.writeVector(self.toon_texture_factor)
class GroupMorph(Morph):
def __init__(self, *args, **kwargs):
Morph.__init__(self, *args, **kwargs)
def type_index(self):
return 0
def load(self, fs):
self.offsets = []
num = fs.readInt()
for i in range(num):
t = GroupMorphOffset()
t.load(fs)
self.offsets.append(t)
class GroupMorphOffset:
def __init__(self):
self.morph = None
self.factor = 0.0
def load(self, fs):
self.morph = fs.readMorphIndex()
self.factor = fs.readFloat()
def save(self, fs):
fs.writeMorphIndex(self.morph)
fs.writeFloat(self.factor)
class Display:
def __init__(self):
self.name = ''
self.name_e = ''
self.isSpecial = False
self.data = []
def __repr__(self):
return '<Display name %s, name_e %s>'%(
self.name,
self.name_e,
)
def load(self, fs):
self.name = fs.readStr()
self.name_e = fs.readStr()
self.isSpecial = (fs.readByte() == 1)
num = fs.readInt()
self.data = []
for i in range(num):
disp_type = fs.readByte()
index = None
if disp_type == 0:
index = fs.readBoneIndex()
elif disp_type == 1:
index = fs.readMorphIndex()
else:
raise Exception('invalid value.')
self.data.append((disp_type, index))
logging.debug('the number of display elements: %d', len(self.data))
def save(self, fs):
fs.writeStr(self.name)
fs.writeStr(self.name_e)
fs.writeByte(int(self.isSpecial))
fs.writeInt(len(self.data))
for disp_type, index in self.data:
fs.writeByte(disp_type)
if disp_type == 0:
fs.writeBoneIndex(index)
elif disp_type == 1:
fs.writeMorphIndex(index)
else:
raise Exception('invalid value.')
class Rigid:
TYPE_SPHERE = 0
TYPE_BOX = 1
TYPE_CAPSULE = 2
MODE_STATIC = 0
MODE_DYNAMIC = 1
MODE_DYNAMIC_BONE = 2
def __init__(self):
self.name = ''
self.name_e = ''
self.bone = None
self.collision_group_number = 0
self.collision_group_mask = 0
self.type = 0
self.size = []
self.location = []
self.rotation = []
self.mass = 1
self.velocity_attenuation = []
self.rotation_attenuation = []
self.bounce = []
self.friction = []
self.mode = 0
def __repr__(self):
return '<Rigid name %s, name_e %s>'%(
self.name,
self.name_e,
)
def load(self, fs):
self.name = fs.readStr()
self.name_e = fs.readStr()
boneIndex = fs.readBoneIndex()
if boneIndex != -1:
self.bone = boneIndex
else:
self.bone = None
self.collision_group_number = fs.readSignedByte()
self.collision_group_mask = fs.readUnsignedShort()
self.type = fs.readSignedByte()
self.size = fs.readVector(3)
self.location = fs.readVector(3)
self.rotation = fs.readVector(3)
self.mass = fs.readFloat()
self.velocity_attenuation = fs.readFloat()
self.rotation_attenuation = fs.readFloat()
self.bounce = fs.readFloat()
self.friction = fs.readFloat()
self.mode = fs.readSignedByte()
def save(self, fs):
fs.writeStr(self.name)
fs.writeStr(self.name_e)
if self.bone is None:
fs.writeBoneIndex(-1)
else:
fs.writeBoneIndex(self.bone)
fs.writeSignedByte(self.collision_group_number)
fs.writeUnsignedShort(self.collision_group_mask)
fs.writeSignedByte(self.type)
fs.writeVector(self.size)
fs.writeVector(self.location)
fs.writeVector(self.rotation)
fs.writeFloat(self.mass)
fs.writeFloat(self.velocity_attenuation)
fs.writeFloat(self.rotation_attenuation)
fs.writeFloat(self.bounce)
fs.writeFloat(self.friction)
fs.writeSignedByte(self.mode)
class Joint:
MODE_SPRING6DOF = 0
def __init__(self):
self.name = ''
self.name_e = ''
self.mode = 0
self.src_rigid = None
self.dest_rigid = None
self.location = []
self.rotation = []
self.maximum_location = []
self.minimum_location = []
self.maximum_rotation = []
self.minimum_rotation = []
self.spring_constant = []
self.spring_rotation_constant = []
def load(self, fs):
self.name = fs.readStr()
self.name_e = fs.readStr()
self.mode = fs.readSignedByte()
self.src_rigid = fs.readRigidIndex()
self.dest_rigid = fs.readRigidIndex()
if self.src_rigid == -1:
self.src_rigid = None
if self.dest_rigid == -1:
self.dest_rigid = None
self.location = fs.readVector(3)
self.rotation = fs.readVector(3)
self.minimum_location = fs.readVector(3)
self.maximum_location = fs.readVector(3)
self.minimum_rotation = fs.readVector(3)
self.maximum_rotation = fs.readVector(3)
self.spring_constant = fs.readVector(3)
self.spring_rotation_constant = fs.readVector(3)
def save(self, fs):
fs.writeStr(self.name)
fs.writeStr(self.name_e)
fs.writeSignedByte(self.mode)
if self.src_rigid is not None:
fs.writeRigidIndex(self.src_rigid)
else:
fs.writeRigidIndex(-1)
if self.dest_rigid is not None:
fs.writeRigidIndex(self.dest_rigid)
else:
fs.writeRigidIndex(-1)
fs.writeVector(self.location)
fs.writeVector(self.rotation)
fs.writeVector(self.minimum_location)
fs.writeVector(self.maximum_location)
fs.writeVector(self.minimum_rotation)
fs.writeVector(self.maximum_rotation)
fs.writeVector(self.spring_constant)
fs.writeVector(self.spring_rotation_constant)
def load(path):
with FileReadStream(path) as fs:
logging.info('****************************************')
logging.info(' mmd_tools.pmx module')
logging.info('----------------------------------------')
logging.info(' Start to load model data form a pmx file')
logging.info(' by the mmd_tools.pmx modlue.')
logging.info('')
header = Header()
header.load(fs)
fs.setHeader(header)
model = Model()
model.load(fs)
logging.info(' Finished loading.')
logging.info('----------------------------------------')
logging.info(' mmd_tools.pmx module')
logging.info('****************************************')
return model
def save(path, model):
with FileWriteStream(path) as fs:
header = Header(model)
header.save(fs)
fs.setHeader(header)
model.save(fs)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (C) 2009-2010 Nicolas P. Rougier
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy, glumpy
import OpenGL.GL as gl
class Mesh(object):
def __init__(self, n=64):
self.indices = numpy.zeros((n-1,n-1,4), dtype=numpy.float32)
self.vertices = numpy.zeros((n,n,3), dtype=numpy.float32)
self.texcoords= numpy.zeros((n,n,2), dtype=numpy.float32)
for xi in range(n):
for yi in range(n):
x,y,z = xi/float(n-1), yi/float(n-1), 0
self.vertices[xi,yi] = x-0.5,y-0.5,z
self.texcoords[xi,yi] = x,y
for yi in range(n-1):
for xi in range(n-1):
i = yi*n + xi
self.indices[xi,yi] = i,i+1,i+n+1,i+n
def draw(self):
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnableClientState(gl.GL_TEXTURE_COORD_ARRAY);
gl.glVertexPointerf(self.vertices)
gl.glTexCoordPointerf(self.texcoords)
gl.glDrawElementsus(gl.GL_QUADS, self.indices)
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glDisableClientState(gl.GL_TEXTURE_COORD_ARRAY);
if __name__ == '__main__':
window = glumpy.Window(width=800,height=600)
trackball = glumpy.Trackball(60,30,0.75)
mesh = Mesh(64)
# def func3(x,y):
# return (1-x/2+x**5+y**3)*numpy.exp(-x**2-y**2)
# dx, dy = .01, .01
# x = numpy.arange(-3.0, 3.0, dx, dtype=numpy.float32)
# y = numpy.arange(-3.0, 3.0, dy, dtype=numpy.float32)
# Z = func3(*numpy.meshgrid(x, y))
n = 64.0
X = numpy.empty((n,n), dtype=numpy.float32)
X.flat = numpy.arange(n)*2*numpy.pi/n*2
Y = numpy.empty((n,n), dtype=numpy.float32)
Y.flat = numpy.arange(n)*2*numpy.pi/n*2
Y = numpy.transpose(Y)
Z = numpy.sin(X) + numpy.cos(Y)
I = glumpy.Image(Z, interpolation='bilinear', cmap=glumpy.colormap.Hot,
gridsize= (31.0,31.0,10.0), elevation=0.25)
def draw_background():
viewport = gl.glGetIntegerv(gl.GL_VIEWPORT)
gl.glDisable (gl.GL_LIGHTING)
gl.glDisable (gl.GL_DEPTH_TEST)
gl.glPolygonMode (gl.GL_FRONT_AND_BACK, gl.GL_FILL)
gl.glBegin(gl.GL_QUADS)
#gl.glColor(0.75,0.75,1.0)
gl.glColor(1.0,1.0,0.75)
gl.glVertex(0,0,-1)
gl.glVertex(viewport[2],0,-1)
gl.glColor(1.0,1.0,1.0)
gl.glVertex(viewport[2],viewport[3],0)
gl.glVertex(0,viewport[3],0)
gl.glEnd()
@window.event
def on_draw():
gl.glClearColor(1,1,1,1)
window.clear()
draw_background()
trackball.push()
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glTranslatef(0,0,-0.125)
gl.glColor4f(1,1,1,1)
I.shader.bind(I.texture,I._lut)
mesh.draw()
I.shader.unbind()
trackball.pop()
@window.event
def on_mouse_drag(x, y, dx, dy, button):
trackball.drag_to(x,y,dx,dy)
@window.event
def on_mouse_scroll(x, y, dx, dy):
trackball.zoom_to(x,y,dx,dy)
@window.timer(60.0)
def update(dt):
global X,Y
X += numpy.pi/150.
Y += numpy.pi/200.
Z[...] = numpy.sin(X) + numpy.cos(Y)
I.update()
window.draw()
window.mainloop()
|
unknown
|
codeparrot/codeparrot-clean
| ||
from datetime import date, datetime, timedelta
import tempfile
import zipfile
import os
import os.path
import shutil
from django.apps import apps
from django.db import models
from django.db import transaction
from django import db
from django.core import serializers
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.db.models import Max
from configuration import get_settings
from utilities.deleting_file_field import DeletingFileField
from utilities.safeexec import execute_arglist
class Task(models.Model):
title = models.CharField(max_length=100, help_text = _("The name of the Task"))
description = models.TextField(help_text = _("Description of the assignment."))
publication_date = models.DateTimeField(help_text = _("The time on which the user will see the task."))
submission_date = models.DateTimeField(help_text = _("The time up until the user has time to complete the task. This time will be extended by one hour for those who yust missed the deadline."))
supported_file_types = models.CharField(max_length=1000, default ="^(text/.*|image/.*|application/pdf)$", help_text = _("Regular Expression describing the mime types of solution files that the user is allowed to upload."))
max_file_size = models.IntegerField(default=1000, help_text = _("The maximum size of an uploaded solution file in kilobyte."))
model_solution = models.ForeignKey('solutions.Solution', blank=True,
null=True, related_name='model_solution_task')
all_checker_finished = models.BooleanField(default=False, editable=False, help_text = _("Indicates whether the checker which don't run immediately on submission have been executed."))
final_grade_rating_scale = models.ForeignKey('attestation.RatingScale', null=True, help_text = _("The scale used to mark the whole solution."))
only_trainers_publish = models.BooleanField(default=False, help_text = _("Indicates that only trainers may publish attestations. Otherwise, tutors may publish final attestations within their tutorials."))
jplag_up_to_date = models.BooleanField(default=False, help_text = _("No new solution uploads since the last jPlag run"))
def __unicode__(self):
return self.title
def solutions(self,user):
""" get ALL solutions of the specified user """
return self.solution_set.filter(author=user)
def final_solution(self,user):
""" get FINAL solution of specified user """
solutions = self.solution_set.filter(author=user, final=True)
return solutions.first()
def expired(self):
"""docstring for expired"""
return self.submission_date + timedelta(hours=1) < datetime.now()
def check_all_final_solutions(self):
from checker.basemodels import check_multiple
final_solutions = self.solution_set.filter(final=True)
count = check_multiple(final_solutions, True)
if self.expired():
self.all_checker_finished = True
self.save()
return final_solutions.count()
def get_checkers(self):
from checker.basemodels import Checker
checker_app = apps.get_app_config('checker')
checker_classes = filter(lambda x:issubclass(x,Checker), checker_app.get_models())
unsorted_checker = sum(map(lambda x: list(x.objects.filter(task=self)), checker_classes),[])
checkers = sorted(unsorted_checker, key=lambda checker: checker.order)
return checkers
def jplag_dir_path(self):
return os.path.join(settings.UPLOAD_ROOT, 'jplag', 'Task_' + unicode(self.id))
def jplag_index_url(self):
return os.path.join('jplag', 'Task_' + unicode(self.id), "index.html")
def jplag_log_url(self):
return os.path.join('jplag', 'Task_' + unicode(self.id), "jplag.txt")
def did_jplag_run(self):
return os.path.isdir(self.jplag_dir_path())
def did_jplag_succeed(self):
return os.path.exists(os.path.join(self.jplag_dir_path(), 'index.html'))
def need_to_re_run_jplag(self):
if self.jplag_up_to_date:
self.jplag_up_to_date = False
self.save()
@staticmethod
def jplag_languages():
return { 'Java': { 'param': 'java17', 'files': '.java,.JAVA' },
'R': { 'param': 'text', 'files': '.R' },
'Python': { 'param': 'text', 'files': '.py' },
'Isabelle': { 'param': 'text', 'files': '.thy' },
}
def run_jplag(self, lang):
# sanity check
if not hasattr(settings,'JPLAGJAR'):
raise RuntimeError("Setting JPLAGJAR not set")
if not os.path.exists(settings.JPLAGJAR):
raise RuntimeError("Setting JPLAGJAR points to non-existing file %s" % settings.JPLAGJAR)
if not lang in self.jplag_languages():
raise RuntimeError("Unknown jplag settings %s" % lang)
# Remember jplag setting
configuration = get_settings()
configuration.jplag_setting = lang
configuration.save()
jplag_settings = self.jplag_languages()[lang]
path = self.jplag_dir_path()
tmp = os.path.join(path,"tmp")
# clean out previous run
if self.did_jplag_run():
shutil.rmtree(path)
# create output directory
os.makedirs(path)
# extract all final solutions
os.mkdir(tmp)
final_solutions = self.solution_set.filter(final=True)
from solutions.models import path_for_user
for solution in final_solutions:
subpath = os.path.join(tmp, path_for_user(solution.author))
os.mkdir(subpath)
solution.copySolutionFiles(subpath)
# run jplag
args = [settings.JVM,
"-jar", settings.JPLAGJAR,
"-l", jplag_settings['param'],
"-p", jplag_settings['files'],
"-r", path,
tmp]
[output, error, exitcode,timed_out, oom_ed] = \
execute_arglist(args, path, unsafe=True)
# remove solution copies
shutil.rmtree(tmp)
# write log file
file(os.path.join(path,"jplag.txt"),'w').write(output)
# mark jplag as up-to-date
self.jplag_up_to_date = True
self.save()
@classmethod
def export_Tasks(cls, qureyset):
""" Serializes a task queryset and related checkers to xml and bundels it with all files into a zipfile """
from solutions.models import Solution, SolutionFile
# fetch tasks, media objects, checker and serialize
task_objects = list(qureyset)
media_objects = list( MediaFile.objects.filter(task__in=task_objects) )
model_solution_objects = list( Solution.objects.filter(model_solution_task__in=task_objects) )
model_solution_file_objects = list( SolutionFile.objects.filter(solution__in=model_solution_objects) )
from checker.basemodels import Checker
checker_app = apps.get_app_config('checker')
checker_classes = filter(lambda x:issubclass(x,Checker), checker_app.get_models())
checker_objects = sum(map(lambda x: list(x.objects.filter(task__in=task_objects)), checker_classes),[])
data = serializers.serialize("xml", task_objects + media_objects + checker_objects + model_solution_objects + model_solution_file_objects)
# fetch files
files = []
for checker_object in checker_objects:
file_fields = filter(lambda x: isinstance(x, models.FileField) , checker_object.__class__._meta.fields)
files += map(lambda file_field: checker_object.__getattribute__(file_field.attname), file_fields)
for media_object in media_objects:
files.append(media_object.media_file)
for model_solution_file_object in model_solution_file_objects:
files.append(model_solution_file_object.file)
# zip it up
zip_file = tempfile.SpooledTemporaryFile()
zip = zipfile.ZipFile(zip_file,'w')
zip.writestr('data.xml', data)
for file in files:
zip.write(file.path, file.name)
zip.close()
zip_file.seek(0) # rewind
return zip_file # return unclosed file-like object!?
@classmethod
@transaction.atomic
def import_Tasks(cls, zip_file, solution_author):
from solutions.models import Solution, SolutionFile
zip = zipfile.ZipFile(zip_file,'r')
data = zip.read('data.xml')
task_id_map = {}
solution_id_map = {}
old_solution_to_new_task_map = {}
solution_list = []
for deserialized_object in serializers.deserialize("xml", data):
object = deserialized_object.object
old_id = object.id
object.id = None
if isinstance(object, Task):
# save all tasks and their old id
object.publication_date = date.max
deserialized_object.save()
task_id_map[old_id] = object.id
old_solution_to_new_task_map[object.model_solution_id] = object.id
object.model_solution = None
object.final_grade_rating_scale = None
deserialized_object.save()
else:
# save modelsolution, media and checker, update task id
if isinstance(object, SolutionFile):
object.solution_id = solution_id_map[object.solution_id]
else:
object.task_id = task_id_map[object.task_id]
from django.core.files import File
for file_field in filter(lambda x: isinstance(x, models.FileField) , object.__class__._meta.fields):
file_field_instance = object.__getattribute__(file_field.attname)
temp_file = tempfile.NamedTemporaryFile() # autodeleted
temp_file.write(zip.open(file_field_instance.name).read())
file_field_instance.save(file_field_instance.name, File(temp_file))
deserialized_object.save()
if isinstance(object, Solution):
task = Task.objects.get(id=old_solution_to_new_task_map[old_id])
task.model_solution = object
task.save()
solution_id_map[old_id] = object.id
solution_list.append(object)
object.author = solution_author
object.save()
for solution in solution_list:
solution.check_solution(run_secret=True)
def get_mediafile_storage_path(instance, filename):
return 'TaskMediaFiles/Task_%s/%s' % (instance.task.pk, filename)
class MediaFile(models.Model):
task = models.ForeignKey(Task)
media_file = DeletingFileField(upload_to=get_mediafile_storage_path, max_length=500)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: zoidberg
"""
import re
from time import time
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class MegasharesCom(SimpleHoster):
__name__ = "MegasharesCom"
__type__ = "hoster"
__pattern__ = r"http://(\w+\.)?megashares.com/.*"
__version__ = "0.21"
__description__ = """megashares.com plugin - free only"""
__author_name__ = ("zoidberg")
__author_mail__ = ("zoidberg@mujmail.cz")
FILE_NAME_PATTERN = '<h1 class="black xxl"[^>]*title="(?P<N>[^"]+)">'
FILE_SIZE_PATTERN = '<strong><span class="black">Filesize:</span></strong> (?P<S>[0-9.]+) (?P<U>[kKMG])i?B<br />'
DOWNLOAD_URL_PATTERN = '<div id="show_download_button_%d"[^>]*>\s*<a href="([^"]+)">'
PASSPORT_LEFT_PATTERN = 'Your Download Passport is: <[^>]*>(\w+).*\s*You have\s*<[^>]*>\s*([0-9.]+) ([kKMG]i?B)'
PASSPORT_RENEW_PATTERN = 'Your download passport will renew in\s*<strong>(\d+)</strong>:<strong>(\d+)</strong>:<strong>(\d+)</strong>'
REACTIVATE_NUM_PATTERN = r'<input[^>]*id="random_num" value="(\d+)" />'
REACTIVATE_PASSPORT_PATTERN = r'<input[^>]*id="passport_num" value="(\w+)" />'
REQUEST_URI_PATTERN = r'var request_uri = "([^"]+)";'
NO_SLOTS_PATTERN = r'<dd class="red">All download slots for this link are currently filled'
FILE_OFFLINE_PATTERN = r'<dd class="red">(Invalid Link Request|Link has been deleted)'
def setup(self):
self.resumeDownload = True
self.multiDL = True if self.premium else False
def handlePremium(self):
self.handleDownload(True)
def handleFree(self):
self.html = self.load(self.pyfile.url, decode=True)
if self.NO_SLOTS_PATTERN in self.html:
self.retry(wait_time = 300)
self.getFileInfo()
#if self.pyfile.size > 576716800: self.fail("This file is too large for free download")
# Reactivate passport if needed
found = re.search(self.REACTIVATE_PASSPORT_PATTERN, self.html)
if found:
passport_num = found.group(1)
request_uri = re.search(self.REQUEST_URI_PATTERN, self.html).group(1)
for i in range(5):
random_num = re.search(self.REACTIVATE_NUM_PATTERN, self.html).group(1)
verifyinput = self.decryptCaptcha("http://megashares.com/index.php?secgfx=gfx&random_num=%s" % random_num)
self.logInfo("Reactivating passport %s: %s %s" % (passport_num, random_num, verifyinput))
url = "http://d01.megashares.com%s&rs=check_passport_renewal" % request_uri + \
"&rsargs[]=%s&rsargs[]=%s&rsargs[]=%s" % (verifyinput, random_num, passport_num) + \
"&rsargs[]=replace_sec_pprenewal&rsrnd=%s" % str(int(time()*1000))
self.logDebug(url)
response = self.load(url)
if 'Thank you for reactivating your passport.' in response:
self.correctCaptcha()
self.retry(0)
else:
self.invalidCaptcha()
else: self.fail("Failed to reactivate passport")
# Check traffic left on passport
found = re.search(self.PASSPORT_LEFT_PATTERN, self.html)
if not found: self.fail('Passport not found')
self.logInfo("Download passport: %s" % found.group(1))
data_left = float(found.group(2)) * 1024 ** {'KB': 1, 'MB': 2, 'GB': 3}[found.group(3)]
self.logInfo("Data left: %s %s (%d MB needed)" % (found.group(2), found.group(3), self.pyfile.size / 1048576))
if not data_left:
found = re.search(self.PASSPORT_RENEW_PATTERN, self.html)
renew = (found.group(1) + 60 * (found.group(2) + 60 * found.group(3))) if found else 600
self.retry(renew, 15, "Unable to get passport")
self.handleDownload(False)
def handleDownload(self, premium = False):
# Find download link;
found = re.search(self.DOWNLOAD_URL_PATTERN % (1 if premium else 2), self.html)
msg = '%s download URL' % ('Premium' if premium else 'Free')
if not found: self.parseError(msg)
download_url = found.group(1)
self.logDebug("%s: %s" % (msg, download_url))
self.download(download_url)
getInfo = create_getInfo(MegasharesCom)
|
unknown
|
codeparrot/codeparrot-clean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.